aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHenrique de Moraes Holschuh <hmh@hmh.eng.br>2010-05-16 18:40:56 -0400
committerHenrique de Moraes Holschuh <hmh@hmh.eng.br>2010-05-16 18:40:56 -0400
commitb65b34895437915f411882dd40d704eb0863ffb0 (patch)
tree3e9302ab09c9a9068fd8fd80708ef3a35b55e665 /net
parent88cc83772a3c7756b9f2b4ba835545ad90a08409 (diff)
parentb57f95a38233a2e73b679bea4a5453a1cc2a1cc9 (diff)
Merge mainline (v2.6.34-rc7)
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c1
-rw-r--r--net/802/p8022.c1
-rw-r--r--net/802/p8023.c1
-rw-r--r--net/802/psnap.c1
-rw-r--r--net/802/stp.c1
-rw-r--r--net/802/tr.c1
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/8021q/vlan_dev.c81
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/client.c134
-rw-r--r--net/9p/protocol.c75
-rw-r--r--net/9p/protocol.h6
-rw-r--r--net/9p/trans_fd.c1
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/9p/trans_virtio.c143
-rw-r--r--net/9p/util.c1
-rw-r--r--net/appletalk/aarp.c1
-rw-r--r--net/appletalk/atalk_proc.c30
-rw-r--r--net/appletalk/ddp.c1
-rw-r--r--net/atm/addr.c3
-rw-r--r--net/atm/atm_misc.c40
-rw-r--r--net/atm/atm_sysfs.c28
-rw-r--r--net/atm/br2684.c91
-rw-r--r--net/atm/clip.c87
-rw-r--r--net/atm/common.c387
-rw-r--r--net/atm/ioctl.c196
-rw-r--r--net/atm/lec.c600
-rw-r--r--net/atm/mpc.c541
-rw-r--r--net/atm/mpoa_caches.c191
-rw-r--r--net/atm/mpoa_proc.c90
-rw-r--r--net/atm/pppoatm.c29
-rw-r--r--net/atm/proc.c84
-rw-r--r--net/atm/pvc.c43
-rw-r--r--net/atm/raw.c27
-rw-r--r--net/atm/resources.c419
-rw-r--r--net/atm/signaling.c220
-rw-r--r--net/atm/svc.c258
-rw-r--r--net/ax25/af_ax25.c19
-rw-r--r--net/ax25/ax25_dev.c1
-rw-r--r--net/ax25/ax25_ds_subr.c1
-rw-r--r--net/ax25/ax25_iface.c1
-rw-r--r--net/ax25/ax25_in.c1
-rw-r--r--net/ax25/ax25_ip.c1
-rw-r--r--net/ax25/ax25_out.c1
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/ax25/ax25_uid.c26
-rw-r--r--net/ax25/sysctl_net_ax25.c1
-rw-r--r--net/bluetooth/af_bluetooth.c1
-rw-r--r--net/bluetooth/bnep/core.c1
-rw-r--r--net/bluetooth/bnep/netdev.c7
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/capi.c37
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_core.c12
-rw-r--r--net/bluetooth/hci_event.c1
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/hci_sysfs.c125
-rw-r--r--net/bluetooth/hidp/core.c17
-rw-r--r--net/bluetooth/hidp/sock.c2
-rw-r--r--net/bluetooth/l2cap.c54
-rw-r--r--net/bluetooth/rfcomm/core.c40
-rw-r--r--net/bluetooth/rfcomm/sock.c39
-rw-r--r--net/bluetooth/sco.c39
-rw-r--r--net/bridge/Kconfig14
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br_device.c23
-rw-r--r--net/bridge/br_fdb.c1
-rw-r--r--net/bridge/br_forward.c167
-rw-r--r--net/bridge/br_if.c9
-rw-r--r--net/bridge/br_input.c42
-rw-r--r--net/bridge/br_ioctl.c1
-rw-r--r--net/bridge/br_multicast.c1309
-rw-r--r--net/bridge/br_netfilter.c1
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h185
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_bpdu.c1
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/bridge/br_sysfs_br.c281
-rw-r--r--net/bridge/br_sysfs_if.c20
-rw-r--r--net/bridge/netfilter/ebt_802_3.c2
-rw-r--r--net/bridge/netfilter/ebt_arp.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c2
-rw-r--r--net/bridge/netfilter/ebt_dnat.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c2
-rw-r--r--net/bridge/netfilter/ebt_limit.c18
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebt_mark.c33
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c39
-rw-r--r--net/bridge/netfilter/ebt_nflog.c2
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c2
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/bridge/netfilter/ebt_stp.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c3
-rw-r--r--net/bridge/netfilter/ebt_vlan.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c2
-rw-r--r--net/bridge/netfilter/ebtables.c1242
-rw-r--r--net/can/af_can.c124
-rw-r--r--net/can/af_can.h4
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/proc.c93
-rw-r--r--net/can/raw.c3
-rw-r--r--net/compat.c1
-rw-r--r--net/core/datagram.c1
-rw-r--r--net/core/dev.c303
-rw-r--r--net/core/dev_mcast.c5
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/ethtool.c434
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/filter.c9
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/core/iovec.c1
-rw-r--r--net/core/link_watch.c1
-rw-r--r--net/core/neighbour.c21
-rw-r--r--net/core/net-sysfs.c1
-rw-r--r--net/core/net-traces.c1
-rw-r--r--net/core/netpoll.c179
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/rtnetlink.c134
-rw-r--r--net/core/scm.c3
-rw-r--r--net/core/sock.c38
-rw-r--r--net/core/sysctl_net_core.c1
-rw-r--r--net/dcb/dcbnl.c17
-rw-r--r--net/dccp/ccid.c11
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/feat.c1
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dccp/ipv6.c13
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/dccp/output.c1
-rw-r--r--net/dccp/probe.c1
-rw-r--r--net/dccp/proto.c24
-rw-r--r--net/decnet/dn_dev.c1
-rw-r--r--net/decnet/dn_fib.c1
-rw-r--r--net/decnet/dn_neigh.c1
-rw-r--r--net/decnet/dn_nsp_in.c1
-rw-r--r--net/decnet/dn_nsp_out.c1
-rw-r--r--net/decnet/dn_route.c15
-rw-r--r--net/decnet/dn_table.c1
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c1
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/tag_dsa.c1
-rw-r--r--net/dsa/tag_edsa.c1
-rw-r--r--net/dsa/tag_trailer.c1
-rw-r--r--net/econet/af_econet.c1
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ethernet/pe2.c1
-rw-r--r--net/ieee802154/af_ieee802154.c7
-rw-r--r--net/ieee802154/dgram.c1
-rw-r--r--net/ieee802154/netlink.c1
-rw-r--r--net/ieee802154/nl-mac.c1
-rw-r--r--net/ieee802154/nl-phy.c1
-rw-r--r--net/ieee802154/raw.c1
-rw-r--r--net/ieee802154/wpan-class.c1
-rw-r--r--net/ipv4/af_inet.c52
-rw-r--r--net/ipv4/ah4.c3
-rw-r--r--net/ipv4/arp.c59
-rw-r--r--net/ipv4/cipso_ipv4.c1
-rw-r--r--net/ipv4/devinet.c33
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c5
-rw-r--r--net/ipv4/fib_hash.c1
-rw-r--r--net/ipv4/fib_semantics.c81
-rw-r--r--net/ipv4/fib_trie.c9
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/igmp.c88
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c1
-rw-r--r--net/ipv4/inet_fragment.c1
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ip_fragment.c40
-rw-r--r--net/ipv4/ip_gre.c37
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c15
-rw-r--r--net/ipv4/ipcomp.c11
-rw-r--r--net/ipv4/ipconfig.c58
-rw-r--r--net/ipv4/ipip.c21
-rw-r--r--net/ipv4/ipmr.c18
-rw-r--r--net/ipv4/netfilter.c1
-rw-r--r--net/ipv4/netfilter/arp_tables.c386
-rw-r--r--net/ipv4/netfilter/arptable_filter.c96
-rw-r--r--net/ipv4/netfilter/ip_queue.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c557
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c15
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c5
-rw-r--r--net/ipv4/netfilter/iptable_filter.c125
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c167
-rw-r--r--net/ipv4/netfilter/iptable_raw.c97
-rw-r--r--net/ipv4/netfilter/iptable_security.c118
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c11
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c19
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c25
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c105
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c40
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c42
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c154
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c32
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c1
-rw-r--r--net/ipv4/proc.c34
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c128
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c15
-rw-r--r--net/ipv4/tcp.c132
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv4/tcp_input.c16
-rw-r--r--net/ipv4/tcp_ipv4.c40
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_output.c41
-rw-r--r--net/ipv4/tcp_probe.c1
-rw-r--r--net/ipv4/tcp_timer.c30
-rw-r--r--net/ipv4/tunnel4.c1
-rw-r--r--net/ipv4/udp.c30
-rw-r--r--net/ipv4/udplite.c4
-rw-r--r--net/ipv4/xfrm4_input.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c1
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/addrconf.c145
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/addrlabel.c1
-rw-r--r--net/ipv6/af_inet6.c35
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/anycast.c3
-rw-r--r--net/ipv6/datagram.c1
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/fib6_rules.c15
-rw-r--r--net/ipv6/icmp.c13
-rw-r--r--net/ipv6/inet6_connection_sock.c1
-rw-r--r--net/ipv6/ip6_fib.c53
-rw-r--r--net/ipv6/ip6_flowlabel.c10
-rw-r--r--net/ipv6/ip6_input.c4
-rw-r--r--net/ipv6/ip6_output.c26
-rw-r--r--net/ipv6/ip6_tunnel.c44
-rw-r--r--net/ipv6/ip6mr.c15
-rw-r--r--net/ipv6/ipcomp6.c15
-rw-r--r--net/ipv6/ipv6_sockglue.c1
-rw-r--r--net/ipv6/mcast.c33
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/netfilter/ip6_queue.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c559
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c3
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c114
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c142
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c87
-rw-r--r--net/ipv6/netfilter/ip6table_security.c110
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c11
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c16
-rw-r--r--net/ipv6/proc.c39
-rw-r--r--net/ipv6/raw.c5
-rw-r--r--net/ipv6/reassembly.c28
-rw-r--r--net/ipv6/route.c41
-rw-r--r--net/ipv6/sit.c26
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/sysctl_net_ipv6.c5
-rw-r--r--net/ipv6/tcp_ipv6.c32
-rw-r--r--net/ipv6/tunnel6.c5
-rw-r--r--net/ipv6/udp.c55
-rw-r--r--net/ipv6/udplite.c4
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c1
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/ipv6/xfrm6_tunnel.c195
-rw-r--r--net/ipx/af_ipx.c1
-rw-r--r--net/ipx/ipx_proc.c90
-rw-r--r--net/ipx/ipx_route.c1
-rw-r--r--net/irda/af_irda.c1
-rw-r--r--net/irda/discovery.c1
-rw-r--r--net/irda/ircomm/ircomm_core.c1
-rw-r--r--net/irda/ircomm/ircomm_lmp.c1
-rw-r--r--net/irda/ircomm/ircomm_param.c1
-rw-r--r--net/irda/ircomm/ircomm_tty.c7
-rw-r--r--net/irda/irda_device.c1
-rw-r--r--net/irda/iriap.c1
-rw-r--r--net/irda/iriap_event.c2
-rw-r--r--net/irda/irias_object.c1
-rw-r--r--net/irda/irlan/irlan_client.c1
-rw-r--r--net/irda/irlan/irlan_common.c29
-rw-r--r--net/irda/irlan/irlan_eth.c5
-rw-r--r--net/irda/irlan/irlan_provider.c1
-rw-r--r--net/irda/irlap_event.c1
-rw-r--r--net/irda/irlap_frame.c1
-rw-r--r--net/irda/irnet/irnet_irda.c1
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/irda/irnetlink.c3
-rw-r--r--net/irda/irqueue.c1
-rw-r--r--net/irda/irttp.c1
-rw-r--r--net/key/af_key.c169
-rw-r--r--net/lapb/lapb_iface.c1
-rw-r--r--net/lapb/lapb_in.c1
-rw-r--r--net/lapb/lapb_out.c1
-rw-r--r--net/lapb/lapb_subr.c1
-rw-r--r--net/llc/af_llc.c65
-rw-r--r--net/llc/llc_c_ac.c3
-rw-r--r--net/llc/llc_conn.c147
-rw-r--r--net/llc/llc_core.c53
-rw-r--r--net/llc/llc_if.c1
-rw-r--r--net/llc/llc_input.c1
-rw-r--r--net/llc/llc_output.c45
-rw-r--r--net/llc/llc_proc.c69
-rw-r--r--net/llc/llc_sap.c112
-rw-r--r--net/llc/llc_station.c1
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c16
-rw-r--r--net/mac80211/agg-tx.c40
-rw-r--r--net/mac80211/cfg.c199
-rw-r--r--net/mac80211/debugfs.c127
-rw-r--r--net/mac80211/debugfs_key.c3
-rw-r--r--net/mac80211/debugfs_netdev.c217
-rw-r--r--net/mac80211/debugfs_netdev.h9
-rw-r--r--net/mac80211/debugfs_sta.c88
-rw-r--r--net/mac80211/driver-ops.h169
-rw-r--r--net/mac80211/driver-trace.h174
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c126
-rw-r--r--net/mac80211/ieee80211_i.h215
-rw-r--r--net/mac80211/iface.c109
-rw-r--r--net/mac80211/key.c11
-rw-r--r--net/mac80211/key.h8
-rw-r--r--net/mac80211/led.c1
-rw-r--r--net/mac80211/main.c78
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/mesh_hwmp.c25
-rw-r--r--net/mac80211/mesh_pathtbl.c7
-rw-r--r--net/mac80211/mesh_plink.c26
-rw-r--r--net/mac80211/mlme.c1334
-rw-r--r--net/mac80211/offchannel.c170
-rw-r--r--net/mac80211/pm.c18
-rw-r--r--net/mac80211/rate.c91
-rw-r--r--net/mac80211/rate.h19
-rw-r--r--net/mac80211/rc80211_minstrel.c1
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c1
-rw-r--r--net/mac80211/rc80211_pid_algo.c9
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c1
-rw-r--r--net/mac80211/rx.c479
-rw-r--r--net/mac80211/scan.c250
-rw-r--r--net/mac80211/spectmgmt.c4
-rw-r--r--net/mac80211/sta_info.c794
-rw-r--r--net/mac80211/sta_info.h68
-rw-r--r--net/mac80211/status.c107
-rw-r--r--net/mac80211/tkip.c47
-rw-r--r--net/mac80211/tx.c382
-rw-r--r--net/mac80211/util.c335
-rw-r--r--net/mac80211/wep.c18
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/mac80211/work.c1101
-rw-r--r--net/mac80211/wpa.c59
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/core.c1
-rw-r--r--net/netfilter/ipvs/Kconfig11
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c43
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c68
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c45
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c1183
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c11
-rw-r--r--net/netfilter/nf_conntrack_acct.c1
-rw-r--r--net/netfilter/nf_conntrack_amanda.c1
-rw-r--r--net/netfilter/nf_conntrack_core.c164
-rw-r--r--net/netfilter/nf_conntrack_ecache.c1
-rw-r--r--net/netfilter/nf_conntrack_expect.c31
-rw-r--r--net/netfilter/nf_conntrack_extend.c1
-rw-r--r--net/netfilter/nf_conntrack_ftp.c1
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c45
-rw-r--r--net/netfilter/nf_conntrack_irc.c1
-rw-r--r--net/netfilter/nf_conntrack_netlink.c233
-rw-r--r--net/netfilter/nf_conntrack_pptp.c14
-rw-r--r--net/netfilter/nf_conntrack_proto.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c4
-rw-r--r--net/netfilter/nf_conntrack_sane.c1
-rw-r--r--net/netfilter/nf_conntrack_sip.c334
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
-rw-r--r--net/netfilter/nf_queue.c3
-rw-r--r--net/netfilter/nfnetlink.c65
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c8
-rw-r--r--net/netfilter/x_tables.c81
-rw-r--r--net/netfilter/xt_CT.c165
-rw-r--r--net/netfilter/xt_LED.c1
-rw-r--r--net/netfilter/xt_NFQUEUE.c6
-rw-r--r--net/netfilter/xt_RATEEST.c8
-rw-r--r--net/netfilter/xt_TCPMSS.c31
-rw-r--r--net/netfilter/xt_connlimit.c28
-rw-r--r--net/netfilter/xt_dccp.c1
-rw-r--r--net/netfilter/xt_hashlimit.c221
-rw-r--r--net/netfilter/xt_limit.c5
-rw-r--r--net/netfilter/xt_osf.c4
-rw-r--r--net/netfilter/xt_quota.c1
-rw-r--r--net/netfilter/xt_recent.c169
-rw-r--r--net/netfilter/xt_repldata.h35
-rw-r--r--net/netfilter/xt_statistic.c1
-rw-r--r--net/netfilter/xt_string.c1
-rw-r--r--net/netlabel/netlabel_cipso_v4.c1
-rw-r--r--net/netlabel/netlabel_domainhash.c30
-rw-r--r--net/netlabel/netlabel_kapi.c1
-rw-r--r--net/netlabel/netlabel_mgmt.c1
-rw-r--r--net/netlabel/netlabel_unlabeled.c70
-rw-r--r--net/netlabel/netlabel_user.c1
-rw-r--r--net/netlink/af_netlink.c27
-rw-r--r--net/netlink/genetlink.c5
-rw-r--r--net/netrom/af_netrom.c22
-rw-r--r--net/netrom/nr_dev.c1
-rw-r--r--net/netrom/nr_in.c1
-rw-r--r--net/netrom/nr_loopback.c1
-rw-r--r--net/netrom/nr_out.c1
-rw-r--r--net/netrom/nr_route.c54
-rw-r--r--net/netrom/nr_subr.c1
-rw-r--r--net/packet/Kconfig10
-rw-r--r--net/packet/af_packet.c307
-rw-r--r--net/phonet/af_phonet.c1
-rw-r--r--net/phonet/datagram.c7
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c30
-rw-r--r--net/phonet/pn_dev.c8
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/phonet/socket.c1
-rw-r--r--net/rds/af_rds.c1
-rw-r--r--net/rds/cong.c1
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib.c1
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/ib_rdma.c1
-rw-r--r--net/rds/ib_recv.c1
-rw-r--r--net/rds/info.c1
-rw-r--r--net/rds/iw.c1
-rw-r--r--net/rds/iw_cm.c1
-rw-r--r--net/rds/iw_rdma.c1
-rw-r--r--net/rds/iw_recv.c1
-rw-r--r--net/rds/loop.c1
-rw-r--r--net/rds/message.c1
-rw-r--r--net/rds/page.c1
-rw-r--r--net/rds/rdma.c1
-rw-r--r--net/rds/rdma_transport.c2
-rw-r--r--net/rds/recv.c1
-rw-r--r--net/rds/send.c1
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rds/tcp_connect.c7
-rw-r--r--net/rds/tcp_listen.c7
-rw-r--r--net/rds/tcp_recv.c1
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rfkill/core.c1
-rw-r--r--net/rfkill/input.c8
-rw-r--r--net/rose/af_rose.c23
-rw-r--r--net/rose/rose_dev.c1
-rw-r--r--net/rose/rose_link.c1
-rw-r--r--net/rose/rose_loopback.c1
-rw-r--r--net/rose/rose_out.c1
-rw-r--r--net/rose/rose_route.c1
-rw-r--r--net/rose/rose_subr.c1
-rw-r--r--net/rxrpc/af_rxrpc.c1
-rw-r--r--net/rxrpc/ar-accept.c7
-rw-r--r--net/rxrpc/ar-ack.c1
-rw-r--r--net/rxrpc/ar-call.c1
-rw-r--r--net/rxrpc/ar-connection.c1
-rw-r--r--net/rxrpc/ar-input.c1
-rw-r--r--net/rxrpc/ar-key.c1
-rw-r--r--net/rxrpc/ar-local.c1
-rw-r--r--net/rxrpc/ar-output.c1
-rw-r--r--net/rxrpc/ar-peer.c1
-rw-r--r--net/rxrpc/ar-transport.c1
-rw-r--r--net/rxrpc/rxkad.c1
-rw-r--r--net/sched/Kconfig5
-rw-r--r--net/sched/act_api.c1
-rw-r--r--net/sched/act_ipt.c1
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/act_pedit.c1
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_cgroup.c37
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_fw.c1
-rw-r--r--net/sched/cls_route.c1
-rw-r--r--net/sched/cls_tcindex.c1
-rw-r--r--net/sched/cls_u32.c1
-rw-r--r--net/sched/em_meta.c1
-rw-r--r--net/sched/em_nbyte.c1
-rw-r--r--net/sched/em_text.c1
-rw-r--r--net/sched/ematch.c1
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_cbq.c1
-rw-r--r--net/sched/sch_drr.c1
-rw-r--r--net/sched/sch_dsmark.c1
-rw-r--r--net/sched/sch_fifo.c35
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_htb.c1
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/sctp/bind_addr.c2
-rw-r--r--net/sctp/chunk.c1
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/input.c43
-rw-r--r--net/sctp/inqueue.c1
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/output.c1
-rw-r--r--net/sctp/outqueue.c1
-rw-r--r--net/sctp/primitive.c1
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c95
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c9
-rw-r--r--net/sctp/socket.c20
-rw-r--r--net/sctp/ssnmap.c1
-rw-r--r--net/sctp/transport.c1
-rw-r--r--net/sctp/tsnmap.c1
-rw-r--r--net/sctp/ulpevent.c1
-rw-r--r--net/sctp/ulpqueue.c1
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/addr.c9
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_generic.c1
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c16
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c1
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c1
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c1
-rw-r--r--net/sunrpc/auth_unix.c1
-rw-r--r--net/sunrpc/backchannel_rqst.c1
-rw-r--r--net/sunrpc/bc_svc.c15
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/rpc_pipe.c13
-rw-r--r--net/sunrpc/rpcb_clnt.c1
-rw-r--r--net/sunrpc/socklib.c1
-rw-r--r--net/sunrpc/stats.c1
-rw-r--r--net/sunrpc/svc.c7
-rw-r--r--net/sunrpc/svc_xprt.c28
-rw-r--r--net/sunrpc/svcauth_unix.c50
-rw-r--r--net/sunrpc/svcsock.c3
-rw-r--r--net/sunrpc/xdr.c1
-rw-r--r--net/sunrpc/xprt.c22
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c6
-rw-r--r--net/sunrpc/xprtrdma/transport.c8
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
-rw-r--r--net/sunrpc/xprtsock.c29
-rw-r--r--net/sysctl_net.c4
-rw-r--r--net/tipc/Kconfig75
-rw-r--r--net/tipc/bearer.c37
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/core.c10
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/eth_media.c1
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/net.c25
-rw-r--r--net/tipc/ref.c26
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/tipc/subscr.c57
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/unix/garbage.c1
-rw-r--r--net/unix/sysctl_net_unix.c3
-rw-r--r--net/wimax/op-msg.c4
-rw-r--r--net/wimax/op-reset.c3
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/wimax/op-state-get.c3
-rw-r--r--net/wimax/stack.c4
-rw-r--r--net/wireless/.gitignore1
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/wireless/Makefile6
-rw-r--r--net/wireless/chan.c41
-rw-r--r--net/wireless/core.c60
-rw-r--r--net/wireless/core.h20
-rw-r--r--net/wireless/db.txt17
-rw-r--r--net/wireless/debugfs.c1
-rw-r--r--net/wireless/genregdb.awk118
-rw-r--r--net/wireless/ibss.c1
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c23
-rw-r--r--net/wireless/mlme.c215
-rw-r--r--net/wireless/nl80211.c867
-rw-r--r--net/wireless/nl80211.h23
-rw-r--r--net/wireless/radiotap.c305
-rw-r--r--net/wireless/reg.c686
-rw-r--r--net/wireless/reg.h29
-rw-r--r--net/wireless/regdb.h7
-rw-r--r--net/wireless/scan.c159
-rw-r--r--net/wireless/sme.c42
-rw-r--r--net/wireless/sysfs.c20
-rw-r--r--net/wireless/util.c138
-rw-r--r--net/wireless/wext-compat.c50
-rw-r--r--net/wireless/wext-core.c1
-rw-r--r--net/wireless/wext-priv.c1
-rw-r--r--net/wireless/wext-proc.c4
-rw-r--r--net/wireless/wext-sme.c1
-rw-r--r--net/x25/af_x25.c93
-rw-r--r--net/x25/x25_dev.c3
-rw-r--r--net/x25/x25_facilities.c27
-rw-r--r--net/x25/x25_forward.c1
-rw-r--r--net/x25/x25_in.c16
-rw-r--r--net/x25/x25_link.c1
-rw-r--r--net/x25/x25_out.c1
-rw-r--r--net/x25/x25_proc.c114
-rw-r--r--net/x25/x25_route.c1
-rw-r--r--net/x25/x25_subr.c1
-rw-r--r--net/xfrm/xfrm_algo.c16
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_ipcomp.c18
-rw-r--r--net/xfrm/xfrm_output.c1
-rw-r--r--net/xfrm/xfrm_policy.c50
-rw-r--r--net/xfrm/xfrm_proc.c6
-rw-r--r--net/xfrm/xfrm_state.c89
-rw-r--r--net/xfrm/xfrm_sysctl.c5
-rw-r--r--net/xfrm/xfrm_user.c111
653 files changed, 20365 insertions, 9734 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index 1dcb0660c49d..9ed7c0e7dc17 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -14,6 +14,7 @@
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/llc.h> 16#include <linux/llc.h>
17#include <linux/slab.h>
17#include <net/llc.h> 18#include <net/llc.h>
18#include <net/llc_pdu.h> 19#include <net/llc_pdu.h>
19#include <net/garp.h> 20#include <net/garp.h>
diff --git a/net/802/p8022.c b/net/802/p8022.c
index 2530f35241cd..7f353c4f437a 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/slab.h>
21#include <net/datalink.h> 22#include <net/datalink.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/in.h> 24#include <linux/in.h>
diff --git a/net/802/p8023.c b/net/802/p8023.c
index 6ab1835041a7..1256a40da43c 100644
--- a/net/802/p8023.c
+++ b/net/802/p8023.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/slab.h>
21 22
22#include <net/datalink.h> 23#include <net/datalink.h>
23#include <net/p8022.h> 24#include <net/p8022.h>
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 6fea0750662b..21cde8fd5795 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/slab.h>
17#include <net/datalink.h> 18#include <net/datalink.h>
18#include <net/llc.h> 19#include <net/llc.h>
19#include <net/psnap.h> 20#include <net/psnap.h>
diff --git a/net/802/stp.c b/net/802/stp.c
index 0b7a24452d11..53c8f77f0ccd 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/llc.h> 13#include <linux/llc.h>
14#include <linux/slab.h>
14#include <net/llc.h> 15#include <net/llc.h>
15#include <net/llc_pdu.h> 16#include <net/llc_pdu.h>
16#include <net/stp.h> 17#include <net/stp.h>
diff --git a/net/802/tr.c b/net/802/tr.c
index 44acce47fcdc..1c6e596074df 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -36,6 +36,7 @@
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/sysctl.h> 38#include <linux/sysctl.h>
39#include <linux/slab.h>
39#include <net/arp.h> 40#include <net/arp.h>
40#include <net/net_namespace.h> 41#include <net/net_namespace.h>
41 42
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 33f90e7362cc..97da977c2a23 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <linux/slab.h>
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/rculist.h> 27#include <linux/rculist.h>
27#include <net/p8022.h> 28#include <net/p8022.h>
@@ -378,6 +379,8 @@ static void vlan_transfer_features(struct net_device *dev,
378#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 379#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
379 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 380 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
380#endif 381#endif
382 vlandev->real_num_tx_queues = dev->real_num_tx_queues;
383 BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
381 384
382 if (old_features != vlandev->features) 385 if (old_features != vlandev->features)
383 netdev_features_change(vlandev); 386 netdev_features_change(vlandev);
@@ -663,7 +666,7 @@ out:
663 return err; 666 return err;
664} 667}
665 668
666static int vlan_init_net(struct net *net) 669static int __net_init vlan_init_net(struct net *net)
667{ 670{
668 struct vlan_net *vn = net_generic(net, vlan_net_id); 671 struct vlan_net *vn = net_generic(net, vlan_net_id);
669 int err; 672 int err;
@@ -675,7 +678,7 @@ static int vlan_init_net(struct net *net)
675 return err; 678 return err;
676} 679}
677 680
678static void vlan_exit_net(struct net *net) 681static void __net_exit vlan_exit_net(struct net *net)
679{ 682{
680 vlan_proc_cleanup(net); 683 vlan_proc_cleanup(net);
681} 684}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5685296017e9..6abdcac1b2e8 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -61,7 +61,7 @@ struct vlan_dev_info {
61 struct proc_dir_entry *dent; 61 struct proc_dir_entry *dent;
62 unsigned long cnt_inc_headroom_on_tx; 62 unsigned long cnt_inc_headroom_on_tx;
63 unsigned long cnt_encap_on_xmit; 63 unsigned long cnt_encap_on_xmit;
64 struct vlan_rx_stats *vlan_rx_stats; 64 struct vlan_rx_stats __percpu *vlan_rx_stats;
65}; 65};
66 66
67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e75a2f3b10af..c584a0af77d3 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -11,9 +11,10 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
11 if (netpoll_rx(skb)) 11 if (netpoll_rx(skb))
12 return NET_RX_DROP; 12 return NET_RX_DROP;
13 13
14 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
15 goto drop; 15 goto drop;
16 16
17 skb->skb_iif = skb->dev->ifindex;
17 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 19 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
19 20
@@ -82,9 +83,10 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
82{ 83{
83 struct sk_buff *p; 84 struct sk_buff *p;
84 85
85 if (skb_bond_should_drop(skb)) 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
86 goto drop; 87 goto drop;
87 88
89 skb->skb_iif = skb->dev->ifindex;
88 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
89 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 91 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
90 92
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index c1b92cab46c7..29b6348c8d4d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h>
24#include <linux/skbuff.h> 25#include <linux/skbuff.h>
25#include <linux/netdevice.h> 26#include <linux/netdevice.h>
26#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
@@ -263,11 +264,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
263 vhdr->h_vlan_TCI = htons(vlan_tci); 264 vhdr->h_vlan_TCI = htons(vlan_tci);
264 265
265 /* 266 /*
266 * Set the protocol type. For a packet of type ETH_P_802_3 we 267 * Set the protocol type. For a packet of type ETH_P_802_3/2 we
267 * put the length in here instead. It is up to the 802.2 268 * put the length in here instead.
268 * layer to carry protocol information.
269 */ 269 */
270 if (type != ETH_P_802_3) 270 if (type != ETH_P_802_3 && type != ETH_P_802_2)
271 vhdr->h_vlan_encapsulated_proto = htons(type); 271 vhdr->h_vlan_encapsulated_proto = htons(type);
272 else 272 else
273 vhdr->h_vlan_encapsulated_proto = htons(len); 273 vhdr->h_vlan_encapsulated_proto = htons(len);
@@ -323,7 +323,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
323 } 323 }
324 324
325 325
326 skb->dev = vlan_dev_info(dev)->real_dev; 326 skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
327 len = skb->len; 327 len = skb->len;
328 ret = dev_queue_xmit(skb); 328 ret = dev_queue_xmit(skb);
329 329
@@ -362,6 +362,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
362 return ret; 362 return ret;
363} 363}
364 364
365static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
366{
367 struct net_device *rdev = vlan_dev_info(dev)->real_dev;
368 const struct net_device_ops *ops = rdev->netdev_ops;
369
370 return ops->ndo_select_queue(rdev, skb);
371}
372
365static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 373static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
366{ 374{
367 /* TODO: gotta make sure the underlying layer can handle it, 375 /* TODO: gotta make sure the underlying layer can handle it,
@@ -689,7 +697,8 @@ static const struct header_ops vlan_header_ops = {
689 .parse = eth_header_parse, 697 .parse = eth_header_parse,
690}; 698};
691 699
692static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; 700static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
701 vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
693 702
694static int vlan_dev_init(struct net_device *dev) 703static int vlan_dev_init(struct net_device *dev)
695{ 704{
@@ -723,11 +732,17 @@ static int vlan_dev_init(struct net_device *dev)
723 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 732 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
724 dev->header_ops = real_dev->header_ops; 733 dev->header_ops = real_dev->header_ops;
725 dev->hard_header_len = real_dev->hard_header_len; 734 dev->hard_header_len = real_dev->hard_header_len;
726 dev->netdev_ops = &vlan_netdev_accel_ops; 735 if (real_dev->netdev_ops->ndo_select_queue)
736 dev->netdev_ops = &vlan_netdev_accel_ops_sq;
737 else
738 dev->netdev_ops = &vlan_netdev_accel_ops;
727 } else { 739 } else {
728 dev->header_ops = &vlan_header_ops; 740 dev->header_ops = &vlan_header_ops;
729 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 741 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
730 dev->netdev_ops = &vlan_netdev_ops; 742 if (real_dev->netdev_ops->ndo_select_queue)
743 dev->netdev_ops = &vlan_netdev_ops_sq;
744 else
745 dev->netdev_ops = &vlan_netdev_ops;
731 } 746 }
732 747
733 if (is_vlan_dev(real_dev)) 748 if (is_vlan_dev(real_dev))
@@ -866,6 +881,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
866#endif 881#endif
867}; 882};
868 883
884static const struct net_device_ops vlan_netdev_ops_sq = {
885 .ndo_select_queue = vlan_dev_select_queue,
886 .ndo_change_mtu = vlan_dev_change_mtu,
887 .ndo_init = vlan_dev_init,
888 .ndo_uninit = vlan_dev_uninit,
889 .ndo_open = vlan_dev_open,
890 .ndo_stop = vlan_dev_stop,
891 .ndo_start_xmit = vlan_dev_hard_start_xmit,
892 .ndo_validate_addr = eth_validate_addr,
893 .ndo_set_mac_address = vlan_dev_set_mac_address,
894 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
895 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
896 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
897 .ndo_do_ioctl = vlan_dev_ioctl,
898 .ndo_neigh_setup = vlan_dev_neigh_setup,
899 .ndo_get_stats = vlan_dev_get_stats,
900#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
901 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
902 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
903 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
904 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
905 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
906#endif
907};
908
909static const struct net_device_ops vlan_netdev_accel_ops_sq = {
910 .ndo_select_queue = vlan_dev_select_queue,
911 .ndo_change_mtu = vlan_dev_change_mtu,
912 .ndo_init = vlan_dev_init,
913 .ndo_uninit = vlan_dev_uninit,
914 .ndo_open = vlan_dev_open,
915 .ndo_stop = vlan_dev_stop,
916 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
917 .ndo_validate_addr = eth_validate_addr,
918 .ndo_set_mac_address = vlan_dev_set_mac_address,
919 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
920 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
921 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
922 .ndo_do_ioctl = vlan_dev_ioctl,
923 .ndo_neigh_setup = vlan_dev_neigh_setup,
924 .ndo_get_stats = vlan_dev_get_stats,
925#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
926 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
927 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
928 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
929 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
930 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
931#endif
932};
933
869void vlan_setup(struct net_device *dev) 934void vlan_setup(struct net_device *dev)
870{ 935{
871 ether_setup(dev); 936 ether_setup(dev);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 9ec1f057c03a..afead353e215 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -140,7 +140,7 @@ void vlan_proc_cleanup(struct net *net)
140 * Create /proc/net/vlan entries 140 * Create /proc/net/vlan entries
141 */ 141 */
142 142
143int vlan_proc_init(struct net *net) 143int __net_init vlan_proc_init(struct net *net)
144{ 144{
145 struct vlan_net *vn = net_generic(net, vlan_net_id); 145 struct vlan_net *vn = net_generic(net, vlan_net_id);
146 146
diff --git a/net/9p/client.c b/net/9p/client.c
index 09d4f1e2e4a8..0aa79faa9850 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -29,6 +29,7 @@
29#include <linux/poll.h> 29#include <linux/poll.h>
30#include <linux/idr.h> 30#include <linux/idr.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
33#include <linux/uaccess.h> 34#include <linux/uaccess.h>
34#include <net/9p/9p.h> 35#include <net/9p/9p.h>
@@ -46,6 +47,7 @@ enum {
46 Opt_msize, 47 Opt_msize,
47 Opt_trans, 48 Opt_trans,
48 Opt_legacy, 49 Opt_legacy,
50 Opt_version,
49 Opt_err, 51 Opt_err,
50}; 52};
51 53
@@ -53,9 +55,43 @@ static const match_table_t tokens = {
53 {Opt_msize, "msize=%u"}, 55 {Opt_msize, "msize=%u"},
54 {Opt_legacy, "noextend"}, 56 {Opt_legacy, "noextend"},
55 {Opt_trans, "trans=%s"}, 57 {Opt_trans, "trans=%s"},
58 {Opt_version, "version=%s"},
56 {Opt_err, NULL}, 59 {Opt_err, NULL},
57}; 60};
58 61
62inline int p9_is_proto_dotl(struct p9_client *clnt)
63{
64 return (clnt->proto_version == p9_proto_2000L);
65}
66EXPORT_SYMBOL(p9_is_proto_dotl);
67
68inline int p9_is_proto_dotu(struct p9_client *clnt)
69{
70 return (clnt->proto_version == p9_proto_2000u);
71}
72EXPORT_SYMBOL(p9_is_proto_dotu);
73
74/* Interpret mount option for protocol version */
75static int get_protocol_version(const substring_t *name)
76{
77 int version = -EINVAL;
78
79 if (!strncmp("9p2000", name->from, name->to-name->from)) {
80 version = p9_proto_legacy;
81 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n");
82 } else if (!strncmp("9p2000.u", name->from, name->to-name->from)) {
83 version = p9_proto_2000u;
84 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
85 } else if (!strncmp("9p2000.L", name->from, name->to-name->from)) {
86 version = p9_proto_2000L;
87 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
88 } else {
89 P9_DPRINTK(P9_DEBUG_ERROR, "Unknown protocol version %s. ",
90 name->from);
91 }
92 return version;
93}
94
59static struct p9_req_t * 95static struct p9_req_t *
60p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); 96p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
61 97
@@ -75,7 +111,7 @@ static int parse_opts(char *opts, struct p9_client *clnt)
75 int option; 111 int option;
76 int ret = 0; 112 int ret = 0;
77 113
78 clnt->dotu = 1; 114 clnt->proto_version = p9_proto_2000u;
79 clnt->msize = 8192; 115 clnt->msize = 8192;
80 116
81 if (!opts) 117 if (!opts)
@@ -118,7 +154,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
118 } 154 }
119 break; 155 break;
120 case Opt_legacy: 156 case Opt_legacy:
121 clnt->dotu = 0; 157 clnt->proto_version = p9_proto_legacy;
158 break;
159 case Opt_version:
160 ret = get_protocol_version(&args[0]);
161 if (ret == -EINVAL)
162 goto free_and_return;
163 clnt->proto_version = ret;
122 break; 164 break;
123 default: 165 default:
124 continue; 166 continue;
@@ -410,14 +452,15 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
410 int ecode; 452 int ecode;
411 char *ename; 453 char *ename;
412 454
413 err = p9pdu_readf(req->rc, c->dotu, "s?d", &ename, &ecode); 455 err = p9pdu_readf(req->rc, c->proto_version, "s?d",
456 &ename, &ecode);
414 if (err) { 457 if (err) {
415 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", 458 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n",
416 err); 459 err);
417 return err; 460 return err;
418 } 461 }
419 462
420 if (c->dotu) 463 if (p9_is_proto_dotu(c))
421 err = -ecode; 464 err = -ecode;
422 465
423 if (!err || !IS_ERR_VALUE(err)) 466 if (!err || !IS_ERR_VALUE(err))
@@ -492,7 +535,12 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
492 535
493 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); 536 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
494 537
495 if (c->status != Connected) 538 /* we allow for any status other than disconnected */
539 if (c->status == Disconnected)
540 return ERR_PTR(-EIO);
541
542 /* if status is begin_disconnected we allow only clunk request */
543 if ((c->status == BeginDisconnect) && (type != P9_TCLUNK))
496 return ERR_PTR(-EIO); 544 return ERR_PTR(-EIO);
497 545
498 if (signal_pending(current)) { 546 if (signal_pending(current)) {
@@ -515,7 +563,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
515 /* marshall the data */ 563 /* marshall the data */
516 p9pdu_prepare(req->tc, tag, type); 564 p9pdu_prepare(req->tc, tag, type);
517 va_start(ap, fmt); 565 va_start(ap, fmt);
518 err = p9pdu_vwritef(req->tc, c->dotu, fmt, ap); 566 err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
519 va_end(ap); 567 va_end(ap);
520 p9pdu_finalize(req->tc); 568 p9pdu_finalize(req->tc);
521 569
@@ -627,14 +675,31 @@ int p9_client_version(struct p9_client *c)
627 char *version; 675 char *version;
628 int msize; 676 int msize;
629 677
630 P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d extended %d\n", 678 P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
631 c->msize, c->dotu); 679 c->msize, c->proto_version);
632 req = p9_client_rpc(c, P9_TVERSION, "ds", c->msize, 680
633 c->dotu ? "9P2000.u" : "9P2000"); 681 switch (c->proto_version) {
682 case p9_proto_2000L:
683 req = p9_client_rpc(c, P9_TVERSION, "ds",
684 c->msize, "9P2000.L");
685 break;
686 case p9_proto_2000u:
687 req = p9_client_rpc(c, P9_TVERSION, "ds",
688 c->msize, "9P2000.u");
689 break;
690 case p9_proto_legacy:
691 req = p9_client_rpc(c, P9_TVERSION, "ds",
692 c->msize, "9P2000");
693 break;
694 default:
695 return -EINVAL;
696 break;
697 }
698
634 if (IS_ERR(req)) 699 if (IS_ERR(req))
635 return PTR_ERR(req); 700 return PTR_ERR(req);
636 701
637 err = p9pdu_readf(req->rc, c->dotu, "ds", &msize, &version); 702 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
638 if (err) { 703 if (err) {
639 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err); 704 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
640 p9pdu_dump(1, req->rc); 705 p9pdu_dump(1, req->rc);
@@ -642,10 +707,12 @@ int p9_client_version(struct p9_client *c)
642 } 707 }
643 708
644 P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version); 709 P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
645 if (!memcmp(version, "9P2000.u", 8)) 710 if (!strncmp(version, "9P2000.L", 8))
646 c->dotu = 1; 711 c->proto_version = p9_proto_2000L;
647 else if (!memcmp(version, "9P2000", 6)) 712 else if (!strncmp(version, "9P2000.u", 8))
648 c->dotu = 0; 713 c->proto_version = p9_proto_2000u;
714 else if (!strncmp(version, "9P2000", 6))
715 c->proto_version = p9_proto_legacy;
649 else { 716 else {
650 err = -EREMOTEIO; 717 err = -EREMOTEIO;
651 goto error; 718 goto error;
@@ -700,8 +767,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
700 goto put_trans; 767 goto put_trans;
701 } 768 }
702 769
703 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", 770 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
704 clnt, clnt->trans_mod, clnt->msize, clnt->dotu); 771 clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
705 772
706 err = clnt->trans_mod->create(clnt, dev_name, options); 773 err = clnt->trans_mod->create(clnt, dev_name, options);
707 if (err) 774 if (err)
@@ -739,8 +806,10 @@ void p9_client_destroy(struct p9_client *clnt)
739 806
740 v9fs_put_trans(clnt->trans_mod); 807 v9fs_put_trans(clnt->trans_mod);
741 808
742 list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) 809 list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) {
810 printk(KERN_INFO "Found fid %d not clunked\n", fid->fid);
743 p9_fid_destroy(fid); 811 p9_fid_destroy(fid);
812 }
744 813
745 if (clnt->fidpool) 814 if (clnt->fidpool)
746 p9_idpool_destroy(clnt->fidpool); 815 p9_idpool_destroy(clnt->fidpool);
@@ -758,6 +827,13 @@ void p9_client_disconnect(struct p9_client *clnt)
758} 827}
759EXPORT_SYMBOL(p9_client_disconnect); 828EXPORT_SYMBOL(p9_client_disconnect);
760 829
830void p9_client_begin_disconnect(struct p9_client *clnt)
831{
832 P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
833 clnt->status = BeginDisconnect;
834}
835EXPORT_SYMBOL(p9_client_begin_disconnect);
836
761struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, 837struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
762 char *uname, u32 n_uname, char *aname) 838 char *uname, u32 n_uname, char *aname)
763{ 839{
@@ -784,7 +860,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
784 goto error; 860 goto error;
785 } 861 }
786 862
787 err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid); 863 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
788 if (err) { 864 if (err) {
789 p9pdu_dump(1, req->rc); 865 p9pdu_dump(1, req->rc);
790 p9_free_req(clnt, req); 866 p9_free_req(clnt, req);
@@ -833,7 +909,7 @@ p9_client_auth(struct p9_client *clnt, char *uname, u32 n_uname, char *aname)
833 goto error; 909 goto error;
834 } 910 }
835 911
836 err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid); 912 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
837 if (err) { 913 if (err) {
838 p9pdu_dump(1, req->rc); 914 p9pdu_dump(1, req->rc);
839 p9_free_req(clnt, req); 915 p9_free_req(clnt, req);
@@ -891,7 +967,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
891 goto error; 967 goto error;
892 } 968 }
893 969
894 err = p9pdu_readf(req->rc, clnt->dotu, "R", &nwqids, &wqids); 970 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
895 if (err) { 971 if (err) {
896 p9pdu_dump(1, req->rc); 972 p9pdu_dump(1, req->rc);
897 p9_free_req(clnt, req); 973 p9_free_req(clnt, req);
@@ -952,7 +1028,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
952 goto error; 1028 goto error;
953 } 1029 }
954 1030
955 err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit); 1031 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
956 if (err) { 1032 if (err) {
957 p9pdu_dump(1, req->rc); 1033 p9pdu_dump(1, req->rc);
958 goto free_and_error; 1034 goto free_and_error;
@@ -997,7 +1073,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
997 goto error; 1073 goto error;
998 } 1074 }
999 1075
1000 err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit); 1076 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
1001 if (err) { 1077 if (err) {
1002 p9pdu_dump(1, req->rc); 1078 p9pdu_dump(1, req->rc);
1003 goto free_and_error; 1079 goto free_and_error;
@@ -1098,7 +1174,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1098 goto error; 1174 goto error;
1099 } 1175 }
1100 1176
1101 err = p9pdu_readf(req->rc, clnt->dotu, "D", &count, &dataptr); 1177 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1102 if (err) { 1178 if (err) {
1103 p9pdu_dump(1, req->rc); 1179 p9pdu_dump(1, req->rc);
1104 goto free_and_error; 1180 goto free_and_error;
@@ -1159,7 +1235,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1159 goto error; 1235 goto error;
1160 } 1236 }
1161 1237
1162 err = p9pdu_readf(req->rc, clnt->dotu, "d", &count); 1238 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
1163 if (err) { 1239 if (err) {
1164 p9pdu_dump(1, req->rc); 1240 p9pdu_dump(1, req->rc);
1165 goto free_and_error; 1241 goto free_and_error;
@@ -1199,7 +1275,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1199 goto error; 1275 goto error;
1200 } 1276 }
1201 1277
1202 err = p9pdu_readf(req->rc, clnt->dotu, "wS", &ignored, ret); 1278 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
1203 if (err) { 1279 if (err) {
1204 p9pdu_dump(1, req->rc); 1280 p9pdu_dump(1, req->rc);
1205 p9_free_req(clnt, req); 1281 p9_free_req(clnt, req);
@@ -1226,7 +1302,7 @@ error:
1226} 1302}
1227EXPORT_SYMBOL(p9_client_stat); 1303EXPORT_SYMBOL(p9_client_stat);
1228 1304
1229static int p9_client_statsize(struct p9_wstat *wst, int optional) 1305static int p9_client_statsize(struct p9_wstat *wst, int proto_version)
1230{ 1306{
1231 int ret; 1307 int ret;
1232 1308
@@ -1245,7 +1321,7 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
1245 if (wst->muid) 1321 if (wst->muid)
1246 ret += strlen(wst->muid); 1322 ret += strlen(wst->muid);
1247 1323
1248 if (optional) { 1324 if (proto_version == p9_proto_2000u) {
1249 ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ 1325 ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */
1250 if (wst->extension) 1326 if (wst->extension)
1251 ret += strlen(wst->extension); 1327 ret += strlen(wst->extension);
@@ -1262,7 +1338,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1262 1338
1263 err = 0; 1339 err = 0;
1264 clnt = fid->clnt; 1340 clnt = fid->clnt;
1265 wst->size = p9_client_statsize(wst, clnt->dotu); 1341 wst->size = p9_client_statsize(wst, clnt->proto_version);
1266 P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); 1342 P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
1267 P9_DPRINTK(P9_DEBUG_9P, 1343 P9_DPRINTK(P9_DEBUG_9P,
1268 " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" 1344 " sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index fc70147c771e..e7541d5b0118 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/slab.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/types.h> 33#include <linux/types.h>
33#include <net/9p/9p.h> 34#include <net/9p/9p.h>
@@ -52,7 +53,7 @@
52#endif 53#endif
53 54
54static int 55static int
55p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...); 56p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
56 57
57#ifdef CONFIG_NET_9P_DEBUG 58#ifdef CONFIG_NET_9P_DEBUG
58void 59void
@@ -144,7 +145,8 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
144*/ 145*/
145 146
146static int 147static int
147p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) 148p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
149 va_list ap)
148{ 150{
149 const char *ptr; 151 const char *ptr;
150 int errcode = 0; 152 int errcode = 0;
@@ -194,7 +196,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
194 int16_t len; 196 int16_t len;
195 int size; 197 int size;
196 198
197 errcode = p9pdu_readf(pdu, optional, "w", &len); 199 errcode = p9pdu_readf(pdu, proto_version,
200 "w", &len);
198 if (errcode) 201 if (errcode)
199 break; 202 break;
200 203
@@ -217,7 +220,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
217 struct p9_qid *qid = 220 struct p9_qid *qid =
218 va_arg(ap, struct p9_qid *); 221 va_arg(ap, struct p9_qid *);
219 222
220 errcode = p9pdu_readf(pdu, optional, "bdq", 223 errcode = p9pdu_readf(pdu, proto_version, "bdq",
221 &qid->type, &qid->version, 224 &qid->type, &qid->version,
222 &qid->path); 225 &qid->path);
223 } 226 }
@@ -230,7 +233,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
230 stbuf->n_uid = stbuf->n_gid = stbuf->n_muid = 233 stbuf->n_uid = stbuf->n_gid = stbuf->n_muid =
231 -1; 234 -1;
232 errcode = 235 errcode =
233 p9pdu_readf(pdu, optional, 236 p9pdu_readf(pdu, proto_version,
234 "wwdQdddqssss?sddd", 237 "wwdQdddqssss?sddd",
235 &stbuf->size, &stbuf->type, 238 &stbuf->size, &stbuf->type,
236 &stbuf->dev, &stbuf->qid, 239 &stbuf->dev, &stbuf->qid,
@@ -250,7 +253,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
250 void **data = va_arg(ap, void **); 253 void **data = va_arg(ap, void **);
251 254
252 errcode = 255 errcode =
253 p9pdu_readf(pdu, optional, "d", count); 256 p9pdu_readf(pdu, proto_version, "d", count);
254 if (!errcode) { 257 if (!errcode) {
255 *count = 258 *count =
256 MIN(*count, 259 MIN(*count,
@@ -263,8 +266,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
263 int16_t *nwname = va_arg(ap, int16_t *); 266 int16_t *nwname = va_arg(ap, int16_t *);
264 char ***wnames = va_arg(ap, char ***); 267 char ***wnames = va_arg(ap, char ***);
265 268
266 errcode = 269 errcode = p9pdu_readf(pdu, proto_version,
267 p9pdu_readf(pdu, optional, "w", nwname); 270 "w", nwname);
268 if (!errcode) { 271 if (!errcode) {
269 *wnames = 272 *wnames =
270 kmalloc(sizeof(char *) * *nwname, 273 kmalloc(sizeof(char *) * *nwname,
@@ -278,7 +281,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
278 281
279 for (i = 0; i < *nwname; i++) { 282 for (i = 0; i < *nwname; i++) {
280 errcode = 283 errcode =
281 p9pdu_readf(pdu, optional, 284 p9pdu_readf(pdu,
285 proto_version,
282 "s", 286 "s",
283 &(*wnames)[i]); 287 &(*wnames)[i]);
284 if (errcode) 288 if (errcode)
@@ -306,7 +310,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
306 *wqids = NULL; 310 *wqids = NULL;
307 311
308 errcode = 312 errcode =
309 p9pdu_readf(pdu, optional, "w", nwqid); 313 p9pdu_readf(pdu, proto_version, "w", nwqid);
310 if (!errcode) { 314 if (!errcode) {
311 *wqids = 315 *wqids =
312 kmalloc(*nwqid * 316 kmalloc(*nwqid *
@@ -321,7 +325,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
321 325
322 for (i = 0; i < *nwqid; i++) { 326 for (i = 0; i < *nwqid; i++) {
323 errcode = 327 errcode =
324 p9pdu_readf(pdu, optional, 328 p9pdu_readf(pdu,
329 proto_version,
325 "Q", 330 "Q",
326 &(*wqids)[i]); 331 &(*wqids)[i]);
327 if (errcode) 332 if (errcode)
@@ -336,7 +341,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
336 } 341 }
337 break; 342 break;
338 case '?': 343 case '?':
339 if (!optional) 344 if (proto_version != p9_proto_2000u)
340 return 0; 345 return 0;
341 break; 346 break;
342 default: 347 default:
@@ -352,7 +357,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
352} 357}
353 358
354int 359int
355p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) 360p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
361 va_list ap)
356{ 362{
357 const char *ptr; 363 const char *ptr;
358 int errcode = 0; 364 int errcode = 0;
@@ -389,7 +395,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
389 if (sptr) 395 if (sptr)
390 len = MIN(strlen(sptr), USHORT_MAX); 396 len = MIN(strlen(sptr), USHORT_MAX);
391 397
392 errcode = p9pdu_writef(pdu, optional, "w", len); 398 errcode = p9pdu_writef(pdu, proto_version,
399 "w", len);
393 if (!errcode && pdu_write(pdu, sptr, len)) 400 if (!errcode && pdu_write(pdu, sptr, len))
394 errcode = -EFAULT; 401 errcode = -EFAULT;
395 } 402 }
@@ -398,7 +405,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
398 const struct p9_qid *qid = 405 const struct p9_qid *qid =
399 va_arg(ap, const struct p9_qid *); 406 va_arg(ap, const struct p9_qid *);
400 errcode = 407 errcode =
401 p9pdu_writef(pdu, optional, "bdq", 408 p9pdu_writef(pdu, proto_version, "bdq",
402 qid->type, qid->version, 409 qid->type, qid->version,
403 qid->path); 410 qid->path);
404 } break; 411 } break;
@@ -406,7 +413,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
406 const struct p9_wstat *stbuf = 413 const struct p9_wstat *stbuf =
407 va_arg(ap, const struct p9_wstat *); 414 va_arg(ap, const struct p9_wstat *);
408 errcode = 415 errcode =
409 p9pdu_writef(pdu, optional, 416 p9pdu_writef(pdu, proto_version,
410 "wwdQdddqssss?sddd", 417 "wwdQdddqssss?sddd",
411 stbuf->size, stbuf->type, 418 stbuf->size, stbuf->type,
412 stbuf->dev, &stbuf->qid, 419 stbuf->dev, &stbuf->qid,
@@ -421,8 +428,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
421 int32_t count = va_arg(ap, int32_t); 428 int32_t count = va_arg(ap, int32_t);
422 const void *data = va_arg(ap, const void *); 429 const void *data = va_arg(ap, const void *);
423 430
424 errcode = 431 errcode = p9pdu_writef(pdu, proto_version, "d",
425 p9pdu_writef(pdu, optional, "d", count); 432 count);
426 if (!errcode && pdu_write(pdu, data, count)) 433 if (!errcode && pdu_write(pdu, data, count))
427 errcode = -EFAULT; 434 errcode = -EFAULT;
428 } 435 }
@@ -431,8 +438,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
431 int32_t count = va_arg(ap, int32_t); 438 int32_t count = va_arg(ap, int32_t);
432 const char __user *udata = 439 const char __user *udata =
433 va_arg(ap, const void __user *); 440 va_arg(ap, const void __user *);
434 errcode = 441 errcode = p9pdu_writef(pdu, proto_version, "d",
435 p9pdu_writef(pdu, optional, "d", count); 442 count);
436 if (!errcode && pdu_write_u(pdu, udata, count)) 443 if (!errcode && pdu_write_u(pdu, udata, count))
437 errcode = -EFAULT; 444 errcode = -EFAULT;
438 } 445 }
@@ -441,14 +448,15 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
441 int16_t nwname = va_arg(ap, int); 448 int16_t nwname = va_arg(ap, int);
442 const char **wnames = va_arg(ap, const char **); 449 const char **wnames = va_arg(ap, const char **);
443 450
444 errcode = 451 errcode = p9pdu_writef(pdu, proto_version, "w",
445 p9pdu_writef(pdu, optional, "w", nwname); 452 nwname);
446 if (!errcode) { 453 if (!errcode) {
447 int i; 454 int i;
448 455
449 for (i = 0; i < nwname; i++) { 456 for (i = 0; i < nwname; i++) {
450 errcode = 457 errcode =
451 p9pdu_writef(pdu, optional, 458 p9pdu_writef(pdu,
459 proto_version,
452 "s", 460 "s",
453 wnames[i]); 461 wnames[i]);
454 if (errcode) 462 if (errcode)
@@ -462,14 +470,15 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
462 struct p9_qid *wqids = 470 struct p9_qid *wqids =
463 va_arg(ap, struct p9_qid *); 471 va_arg(ap, struct p9_qid *);
464 472
465 errcode = 473 errcode = p9pdu_writef(pdu, proto_version, "w",
466 p9pdu_writef(pdu, optional, "w", nwqid); 474 nwqid);
467 if (!errcode) { 475 if (!errcode) {
468 int i; 476 int i;
469 477
470 for (i = 0; i < nwqid; i++) { 478 for (i = 0; i < nwqid; i++) {
471 errcode = 479 errcode =
472 p9pdu_writef(pdu, optional, 480 p9pdu_writef(pdu,
481 proto_version,
473 "Q", 482 "Q",
474 &wqids[i]); 483 &wqids[i]);
475 if (errcode) 484 if (errcode)
@@ -479,7 +488,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
479 } 488 }
480 break; 489 break;
481 case '?': 490 case '?':
482 if (!optional) 491 if (proto_version != p9_proto_2000u)
483 return 0; 492 return 0;
484 break; 493 break;
485 default: 494 default:
@@ -494,32 +503,32 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
494 return errcode; 503 return errcode;
495} 504}
496 505
497int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...) 506int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
498{ 507{
499 va_list ap; 508 va_list ap;
500 int ret; 509 int ret;
501 510
502 va_start(ap, fmt); 511 va_start(ap, fmt);
503 ret = p9pdu_vreadf(pdu, optional, fmt, ap); 512 ret = p9pdu_vreadf(pdu, proto_version, fmt, ap);
504 va_end(ap); 513 va_end(ap);
505 514
506 return ret; 515 return ret;
507} 516}
508 517
509static int 518static int
510p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...) 519p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
511{ 520{
512 va_list ap; 521 va_list ap;
513 int ret; 522 int ret;
514 523
515 va_start(ap, fmt); 524 va_start(ap, fmt);
516 ret = p9pdu_vwritef(pdu, optional, fmt, ap); 525 ret = p9pdu_vwritef(pdu, proto_version, fmt, ap);
517 va_end(ap); 526 va_end(ap);
518 527
519 return ret; 528 return ret;
520} 529}
521 530
522int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu) 531int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
523{ 532{
524 struct p9_fcall fake_pdu; 533 struct p9_fcall fake_pdu;
525 int ret; 534 int ret;
@@ -529,7 +538,7 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu)
529 fake_pdu.sdata = buf; 538 fake_pdu.sdata = buf;
530 fake_pdu.offset = 0; 539 fake_pdu.offset = 0;
531 540
532 ret = p9pdu_readf(&fake_pdu, dotu, "S", st); 541 ret = p9pdu_readf(&fake_pdu, proto_version, "S", st);
533 if (ret) { 542 if (ret) {
534 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); 543 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
535 p9pdu_dump(1, &fake_pdu); 544 p9pdu_dump(1, &fake_pdu);
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
index ccde462e7ac5..2431c0f38d56 100644
--- a/net/9p/protocol.h
+++ b/net/9p/protocol.h
@@ -25,9 +25,9 @@
25 * 25 *
26 */ 26 */
27 27
28int 28int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
29p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap); 29 va_list ap);
30int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...); 30int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); 31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
32int p9pdu_finalize(struct p9_fcall *pdu); 32int p9pdu_finalize(struct p9_fcall *pdu);
33void p9pdu_dump(int, struct p9_fcall *); 33void p9pdu_dump(int, struct p9_fcall *);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 31d0b05582a9..98ce9bcb0e15 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -38,6 +38,7 @@
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/file.h> 39#include <linux/file.h>
40#include <linux/parser.h> 40#include <linux/parser.h>
41#include <linux/slab.h>
41#include <net/9p/9p.h> 42#include <net/9p/9p.h>
42#include <net/9p/client.h> 43#include <net/9p/client.h>
43#include <net/9p/transport.h> 44#include <net/9p/transport.h>
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 2c95a89c0f46..041101ab4aa5 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -40,6 +40,7 @@
40#include <linux/file.h> 40#include <linux/file.h>
41#include <linux/parser.h> 41#include <linux/parser.h>
42#include <linux/semaphore.h> 42#include <linux/semaphore.h>
43#include <linux/slab.h>
43#include <net/9p/9p.h> 44#include <net/9p/9p.h>
44#include <net/9p/client.h> 45#include <net/9p/client.h>
45#include <net/9p/transport.h> 46#include <net/9p/transport.h>
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index cb50f4ae5eef..7eb78ecc1618 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -37,6 +37,7 @@
37#include <linux/inet.h> 37#include <linux/inet.h>
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/file.h> 39#include <linux/file.h>
40#include <linux/slab.h>
40#include <net/9p/9p.h> 41#include <net/9p/9p.h>
41#include <linux/parser.h> 42#include <linux/parser.h>
42#include <net/9p/client.h> 43#include <net/9p/client.h>
@@ -49,8 +50,6 @@
49 50
50/* a single mutex to manage channel initialization and attachment */ 51/* a single mutex to manage channel initialization and attachment */
51static DEFINE_MUTEX(virtio_9p_lock); 52static DEFINE_MUTEX(virtio_9p_lock);
52/* global which tracks highest initialized channel */
53static int chan_index;
54 53
55/** 54/**
56 * struct virtio_chan - per-instance transport information 55 * struct virtio_chan - per-instance transport information
@@ -68,8 +67,7 @@ static int chan_index;
68 * 67 *
69 */ 68 */
70 69
71static struct virtio_chan { 70struct virtio_chan {
72 bool initialized;
73 bool inuse; 71 bool inuse;
74 72
75 spinlock_t lock; 73 spinlock_t lock;
@@ -80,7 +78,17 @@ static struct virtio_chan {
80 78
81 /* Scatterlist: can be too big for stack. */ 79 /* Scatterlist: can be too big for stack. */
82 struct scatterlist sg[VIRTQUEUE_NUM]; 80 struct scatterlist sg[VIRTQUEUE_NUM];
83} channels[MAX_9P_CHAN]; 81
82 int tag_len;
83 /*
84 * tag name to identify a mount Non-null terminated
85 */
86 char *tag;
87
88 struct list_head chan_list;
89};
90
91static struct list_head virtio_chan_list;
84 92
85/* How many bytes left in this page. */ 93/* How many bytes left in this page. */
86static unsigned int rest_of_page(void *data) 94static unsigned int rest_of_page(void *data)
@@ -213,30 +221,38 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
213 return 0; 221 return 0;
214} 222}
215 223
224static ssize_t p9_mount_tag_show(struct device *dev,
225 struct device_attribute *attr, char *buf)
226{
227 struct virtio_chan *chan;
228 struct virtio_device *vdev;
229
230 vdev = dev_to_virtio(dev);
231 chan = vdev->priv;
232
233 return snprintf(buf, chan->tag_len + 1, "%s", chan->tag);
234}
235
236static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
237
216/** 238/**
217 * p9_virtio_probe - probe for existence of 9P virtio channels 239 * p9_virtio_probe - probe for existence of 9P virtio channels
218 * @vdev: virtio device to probe 240 * @vdev: virtio device to probe
219 * 241 *
220 * This probes for existing virtio channels. At present only 242 * This probes for existing virtio channels.
221 * a single channel is in use, so in the future more work may need
222 * to be done here.
223 * 243 *
224 */ 244 */
225 245
226static int p9_virtio_probe(struct virtio_device *vdev) 246static int p9_virtio_probe(struct virtio_device *vdev)
227{ 247{
248 __u16 tag_len;
249 char *tag;
228 int err; 250 int err;
229 struct virtio_chan *chan; 251 struct virtio_chan *chan;
230 int index;
231 252
232 mutex_lock(&virtio_9p_lock); 253 chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
233 index = chan_index++; 254 if (!chan) {
234 chan = &channels[index]; 255 printk(KERN_ERR "9p: Failed to allocate virtio 9P channel\n");
235 mutex_unlock(&virtio_9p_lock);
236
237 if (chan_index > MAX_9P_CHAN) {
238 printk(KERN_ERR "9p: virtio: Maximum channels exceeded\n");
239 BUG();
240 err = -ENOMEM; 256 err = -ENOMEM;
241 goto fail; 257 goto fail;
242 } 258 }
@@ -255,15 +271,37 @@ static int p9_virtio_probe(struct virtio_device *vdev)
255 sg_init_table(chan->sg, VIRTQUEUE_NUM); 271 sg_init_table(chan->sg, VIRTQUEUE_NUM);
256 272
257 chan->inuse = false; 273 chan->inuse = false;
258 chan->initialized = true; 274 if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
275 vdev->config->get(vdev,
276 offsetof(struct virtio_9p_config, tag_len),
277 &tag_len, sizeof(tag_len));
278 } else {
279 err = -EINVAL;
280 goto out_free_vq;
281 }
282 tag = kmalloc(tag_len, GFP_KERNEL);
283 if (!tag) {
284 err = -ENOMEM;
285 goto out_free_vq;
286 }
287 vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag),
288 tag, tag_len);
289 chan->tag = tag;
290 chan->tag_len = tag_len;
291 err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
292 if (err) {
293 kfree(tag);
294 goto out_free_vq;
295 }
296 mutex_lock(&virtio_9p_lock);
297 list_add_tail(&chan->chan_list, &virtio_chan_list);
298 mutex_unlock(&virtio_9p_lock);
259 return 0; 299 return 0;
260 300
261out_free_vq: 301out_free_vq:
262 vdev->config->del_vqs(vdev); 302 vdev->config->del_vqs(vdev);
303 kfree(chan);
263fail: 304fail:
264 mutex_lock(&virtio_9p_lock);
265 chan_index--;
266 mutex_unlock(&virtio_9p_lock);
267 return err; 305 return err;
268} 306}
269 307
@@ -280,35 +318,31 @@ fail:
280 * We use a simple reference count mechanism to ensure that only a single 318 * We use a simple reference count mechanism to ensure that only a single
281 * mount has a channel open at a time. 319 * mount has a channel open at a time.
282 * 320 *
283 * Bugs: doesn't allow identification of a specific channel
284 * to allocate, channels are allocated sequentially. This was
285 * a pragmatic decision to get things rolling, but ideally some
286 * way of identifying the channel to attach to would be nice
287 * if we are going to support multiple channels.
288 *
289 */ 321 */
290 322
291static int 323static int
292p9_virtio_create(struct p9_client *client, const char *devname, char *args) 324p9_virtio_create(struct p9_client *client, const char *devname, char *args)
293{ 325{
294 struct virtio_chan *chan = channels; 326 struct virtio_chan *chan;
295 int index = 0; 327 int ret = -ENOENT;
328 int found = 0;
296 329
297 mutex_lock(&virtio_9p_lock); 330 mutex_lock(&virtio_9p_lock);
298 while (index < MAX_9P_CHAN) { 331 list_for_each_entry(chan, &virtio_chan_list, chan_list) {
299 if (chan->initialized && !chan->inuse) { 332 if (!strncmp(devname, chan->tag, chan->tag_len)) {
300 chan->inuse = true; 333 if (!chan->inuse) {
301 break; 334 chan->inuse = true;
302 } else { 335 found = 1;
303 index++; 336 break;
304 chan = &channels[index]; 337 }
338 ret = -EBUSY;
305 } 339 }
306 } 340 }
307 mutex_unlock(&virtio_9p_lock); 341 mutex_unlock(&virtio_9p_lock);
308 342
309 if (index >= MAX_9P_CHAN) { 343 if (!found) {
310 printk(KERN_ERR "9p: no channels available\n"); 344 printk(KERN_ERR "9p: no channels available\n");
311 return -ENODEV; 345 return ret;
312 } 346 }
313 347
314 client->trans = (void *)chan; 348 client->trans = (void *)chan;
@@ -329,11 +363,15 @@ static void p9_virtio_remove(struct virtio_device *vdev)
329 struct virtio_chan *chan = vdev->priv; 363 struct virtio_chan *chan = vdev->priv;
330 364
331 BUG_ON(chan->inuse); 365 BUG_ON(chan->inuse);
366 vdev->config->del_vqs(vdev);
367
368 mutex_lock(&virtio_9p_lock);
369 list_del(&chan->chan_list);
370 mutex_unlock(&virtio_9p_lock);
371 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
372 kfree(chan->tag);
373 kfree(chan);
332 374
333 if (chan->initialized) {
334 vdev->config->del_vqs(vdev);
335 chan->initialized = false;
336 }
337} 375}
338 376
339static struct virtio_device_id id_table[] = { 377static struct virtio_device_id id_table[] = {
@@ -341,13 +379,19 @@ static struct virtio_device_id id_table[] = {
341 { 0 }, 379 { 0 },
342}; 380};
343 381
382static unsigned int features[] = {
383 VIRTIO_9P_MOUNT_TAG,
384};
385
344/* The standard "struct lguest_driver": */ 386/* The standard "struct lguest_driver": */
345static struct virtio_driver p9_virtio_drv = { 387static struct virtio_driver p9_virtio_drv = {
346 .driver.name = KBUILD_MODNAME, 388 .feature_table = features,
347 .driver.owner = THIS_MODULE, 389 .feature_table_size = ARRAY_SIZE(features),
348 .id_table = id_table, 390 .driver.name = KBUILD_MODNAME,
349 .probe = p9_virtio_probe, 391 .driver.owner = THIS_MODULE,
350 .remove = p9_virtio_remove, 392 .id_table = id_table,
393 .probe = p9_virtio_probe,
394 .remove = p9_virtio_remove,
351}; 395};
352 396
353static struct p9_trans_module p9_virtio_trans = { 397static struct p9_trans_module p9_virtio_trans = {
@@ -364,10 +408,7 @@ static struct p9_trans_module p9_virtio_trans = {
364/* The standard init function */ 408/* The standard init function */
365static int __init p9_virtio_init(void) 409static int __init p9_virtio_init(void)
366{ 410{
367 int count; 411 INIT_LIST_HEAD(&virtio_chan_list);
368
369 for (count = 0; count < MAX_9P_CHAN; count++)
370 channels[count].initialized = false;
371 412
372 v9fs_register_trans(&p9_virtio_trans); 413 v9fs_register_trans(&p9_virtio_trans);
373 return register_virtio_driver(&p9_virtio_drv); 414 return register_virtio_driver(&p9_virtio_drv);
diff --git a/net/9p/util.c b/net/9p/util.c
index dc4ec05ad93d..e048701a72d2 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -30,6 +30,7 @@
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/parser.h> 31#include <linux/parser.h>
32#include <linux/idr.h> 32#include <linux/idr.h>
33#include <linux/slab.h>
33#include <net/9p/9p.h> 34#include <net/9p/9p.h>
34 35
35/** 36/**
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index f2b3b56aa779..50dce7981321 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -30,6 +30,7 @@
30 */ 30 */
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/slab.h>
33#include <net/sock.h> 34#include <net/sock.h>
34#include <net/datalink.h> 35#include <net/datalink.h>
35#include <net/psnap.h> 36#include <net/psnap.h>
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 80caad1a31a5..6ef0e761e5de 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -144,40 +144,16 @@ out:
144 return 0; 144 return 0;
145} 145}
146 146
147static __inline__ struct sock *atalk_get_socket_idx(loff_t pos)
148{
149 struct sock *s;
150 struct hlist_node *node;
151
152 sk_for_each(s, node, &atalk_sockets)
153 if (!pos--)
154 goto found;
155 s = NULL;
156found:
157 return s;
158}
159
160static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos) 147static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos)
161 __acquires(atalk_sockets_lock) 148 __acquires(atalk_sockets_lock)
162{ 149{
163 loff_t l = *pos;
164
165 read_lock_bh(&atalk_sockets_lock); 150 read_lock_bh(&atalk_sockets_lock);
166 return l ? atalk_get_socket_idx(--l) : SEQ_START_TOKEN; 151 return seq_hlist_start_head(&atalk_sockets, *pos);
167} 152}
168 153
169static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) 154static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
170{ 155{
171 struct sock *i; 156 return seq_hlist_next(v, &atalk_sockets, pos);
172
173 ++*pos;
174 if (v == SEQ_START_TOKEN) {
175 i = sk_head(&atalk_sockets);
176 goto out;
177 }
178 i = sk_next(v);
179out:
180 return i;
181} 157}
182 158
183static void atalk_seq_socket_stop(struct seq_file *seq, void *v) 159static void atalk_seq_socket_stop(struct seq_file *seq, void *v)
@@ -197,7 +173,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
197 goto out; 173 goto out;
198 } 174 }
199 175
200 s = v; 176 s = sk_entry(v);
201 at = at_sk(s); 177 at = at_sk(s);
202 178
203 seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X " 179 seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 9fc4da56fb1d..7b02967fbbe7 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -57,6 +57,7 @@
57#include <linux/smp_lock.h> 57#include <linux/smp_lock.h>
58#include <linux/termios.h> /* For TIOCOUTQ/INQ */ 58#include <linux/termios.h> /* For TIOCOUTQ/INQ */
59#include <linux/compat.h> 59#include <linux/compat.h>
60#include <linux/slab.h>
60#include <net/datalink.h> 61#include <net/datalink.h>
61#include <net/psnap.h> 62#include <net/psnap.h>
62#include <net/sock.h> 63#include <net/sock.h>
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 82e85abc303d..dcda35c66f15 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -4,7 +4,8 @@
4 4
5#include <linux/atm.h> 5#include <linux/atm.h>
6#include <linux/atmdev.h> 6#include <linux/atmdev.h>
7#include <asm/uaccess.h> 7#include <linux/slab.h>
8#include <linux/uaccess.h>
8 9
9#include "signaling.h" 10#include "signaling.h"
10#include "addr.h" 11#include "addr.h"
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index 02cc7e71efea..fc63526d8695 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -2,37 +2,35 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */
4 4
5
6#include <linux/module.h> 5#include <linux/module.h>
7#include <linux/atm.h> 6#include <linux/atm.h>
8#include <linux/atmdev.h> 7#include <linux/atmdev.h>
9#include <linux/skbuff.h> 8#include <linux/skbuff.h>
10#include <linux/sonet.h> 9#include <linux/sonet.h>
11#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/errno.h>
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13#include <asm/errno.h>
14
15 13
16int atm_charge(struct atm_vcc *vcc,int truesize) 14int atm_charge(struct atm_vcc *vcc, int truesize)
17{ 15{
18 atm_force_charge(vcc,truesize); 16 atm_force_charge(vcc, truesize);
19 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) 17 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
20 return 1; 18 return 1;
21 atm_return(vcc,truesize); 19 atm_return(vcc, truesize);
22 atomic_inc(&vcc->stats->rx_drop); 20 atomic_inc(&vcc->stats->rx_drop);
23 return 0; 21 return 0;
24} 22}
23EXPORT_SYMBOL(atm_charge);
25 24
26 25struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
27struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 26 gfp_t gfp_flags)
28 gfp_t gfp_flags)
29{ 27{
30 struct sock *sk = sk_atm(vcc); 28 struct sock *sk = sk_atm(vcc);
31 int guess = atm_guess_pdu2truesize(pdu_size); 29 int guess = atm_guess_pdu2truesize(pdu_size);
32 30
33 atm_force_charge(vcc,guess); 31 atm_force_charge(vcc, guess);
34 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 32 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
35 struct sk_buff *skb = alloc_skb(pdu_size,gfp_flags); 33 struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
36 34
37 if (skb) { 35 if (skb) {
38 atomic_add(skb->truesize-guess, 36 atomic_add(skb->truesize-guess,
@@ -40,10 +38,11 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
40 return skb; 38 return skb;
41 } 39 }
42 } 40 }
43 atm_return(vcc,guess); 41 atm_return(vcc, guess);
44 atomic_inc(&vcc->stats->rx_drop); 42 atomic_inc(&vcc->stats->rx_drop);
45 return NULL; 43 return NULL;
46} 44}
45EXPORT_SYMBOL(atm_alloc_charge);
47 46
48 47
49/* 48/*
@@ -73,7 +72,6 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
73 * else * 72 * else *
74 */ 73 */
75 74
76
77int atm_pcr_goal(const struct atm_trafprm *tp) 75int atm_pcr_goal(const struct atm_trafprm *tp)
78{ 76{
79 if (tp->pcr && tp->pcr != ATM_MAX_PCR) 77 if (tp->pcr && tp->pcr != ATM_MAX_PCR)
@@ -84,26 +82,20 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
84 return -tp->max_pcr; 82 return -tp->max_pcr;
85 return 0; 83 return 0;
86} 84}
85EXPORT_SYMBOL(atm_pcr_goal);
87 86
88 87void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
90{ 88{
91#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) 89#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92 __SONET_ITEMS 90 __SONET_ITEMS
93#undef __HANDLE_ITEM 91#undef __HANDLE_ITEM
94} 92}
93EXPORT_SYMBOL(sonet_copy_stats);
95 94
96 95void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
97void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
98{ 96{
99#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i) 97#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100 __SONET_ITEMS 98 __SONET_ITEMS
101#undef __HANDLE_ITEM 99#undef __HANDLE_ITEM
102} 100}
103
104
105EXPORT_SYMBOL(atm_charge);
106EXPORT_SYMBOL(atm_alloc_charge);
107EXPORT_SYMBOL(atm_pcr_goal);
108EXPORT_SYMBOL(sonet_copy_stats);
109EXPORT_SYMBOL(sonet_subtract_stats); 101EXPORT_SYMBOL(sonet_subtract_stats);
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index b5674dc2083d..799c631f0fed 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -1,6 +1,7 @@
1/* ATM driver model support. */ 1/* ATM driver model support. */
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/slab.h>
4#include <linux/init.h> 5#include <linux/init.h>
5#include <linux/kobject.h> 6#include <linux/kobject.h>
6#include <linux/atmdev.h> 7#include <linux/atmdev.h>
@@ -42,13 +43,14 @@ static ssize_t show_atmaddress(struct device *cdev,
42 43
43 spin_lock_irqsave(&adev->lock, flags); 44 spin_lock_irqsave(&adev->lock, flags);
44 list_for_each_entry(aaddr, &adev->local, entry) { 45 list_for_each_entry(aaddr, &adev->local, entry) {
45 for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 46 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
46 if (j == *fmt) { 47 if (j == *fmt) {
47 pos += sprintf(pos, "."); 48 pos += sprintf(pos, ".");
48 ++fmt; 49 ++fmt;
49 j = 0; 50 j = 0;
50 } 51 }
51 pos += sprintf(pos, "%02x", aaddr->addr.sas_addr.prv[i]); 52 pos += sprintf(pos, "%02x",
53 aaddr->addr.sas_addr.prv[i]);
52 } 54 }
53 pos += sprintf(pos, "\n"); 55 pos += sprintf(pos, "\n");
54 } 56 }
@@ -78,17 +80,17 @@ static ssize_t show_link_rate(struct device *cdev,
78 80
79 /* show the link rate, not the data rate */ 81 /* show the link rate, not the data rate */
80 switch (adev->link_rate) { 82 switch (adev->link_rate) {
81 case ATM_OC3_PCR: 83 case ATM_OC3_PCR:
82 link_rate = 155520000; 84 link_rate = 155520000;
83 break; 85 break;
84 case ATM_OC12_PCR: 86 case ATM_OC12_PCR:
85 link_rate = 622080000; 87 link_rate = 622080000;
86 break; 88 break;
87 case ATM_25_PCR: 89 case ATM_25_PCR:
88 link_rate = 25600000; 90 link_rate = 25600000;
89 break; 91 break;
90 default: 92 default:
91 link_rate = adev->link_rate * 8 * 53; 93 link_rate = adev->link_rate * 8 * 53;
92 } 94 }
93 pos += sprintf(pos, "%d\n", link_rate); 95 pos += sprintf(pos, "%d\n", link_rate);
94 96
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index c9230c398697..d6c7ceaf13e9 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -6,6 +6,8 @@
6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory 6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/init.h> 12#include <linux/init.h>
11#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -15,7 +17,8 @@
15#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
17#include <linux/ip.h> 19#include <linux/ip.h>
18#include <asm/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/slab.h>
19#include <net/arp.h> 22#include <net/arp.h>
20#include <linux/atm.h> 23#include <linux/atm.h>
21#include <linux/atmdev.h> 24#include <linux/atmdev.h>
@@ -26,20 +29,14 @@
26 29
27#include "common.h" 30#include "common.h"
28 31
29#ifdef SKB_DEBUG
30static void skb_debug(const struct sk_buff *skb) 32static void skb_debug(const struct sk_buff *skb)
31{ 33{
34#ifdef SKB_DEBUG
32#define NUM2PRINT 50 35#define NUM2PRINT 50
33 char buf[NUM2PRINT * 3 + 1]; /* 3 chars per byte */ 36 print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
34 int i = 0; 37 16, 1, skb->data, min(NUM2PRINT, skb->len), true);
35 for (i = 0; i < skb->len && i < NUM2PRINT; i++) {
36 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
37 }
38 printk(KERN_DEBUG "br2684: skb: %s\n", buf);
39}
40#else
41#define skb_debug(skb) do {} while (0)
42#endif 38#endif
39}
43 40
44#define BR2684_ETHERTYPE_LEN 2 41#define BR2684_ETHERTYPE_LEN 2
45#define BR2684_PAD_LEN 2 42#define BR2684_PAD_LEN 2
@@ -68,7 +65,7 @@ struct br2684_vcc {
68 struct atm_vcc *atmvcc; 65 struct atm_vcc *atmvcc;
69 struct net_device *device; 66 struct net_device *device;
70 /* keep old push, pop functions for chaining */ 67 /* keep old push, pop functions for chaining */
71 void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb); 68 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
72 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); 69 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
73 enum br2684_encaps encaps; 70 enum br2684_encaps encaps;
74 struct list_head brvccs; 71 struct list_head brvccs;
@@ -148,7 +145,7 @@ static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
148 struct br2684_vcc *brvcc = BR2684_VCC(vcc); 145 struct br2684_vcc *brvcc = BR2684_VCC(vcc);
149 struct net_device *net_dev = skb->dev; 146 struct net_device *net_dev = skb->dev;
150 147
151 pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev); 148 pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev);
152 brvcc->old_pop(vcc, skb); 149 brvcc->old_pop(vcc, skb);
153 150
154 if (!net_dev) 151 if (!net_dev)
@@ -244,7 +241,7 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
244 struct br2684_dev *brdev = BRPRIV(dev); 241 struct br2684_dev *brdev = BRPRIV(dev);
245 struct br2684_vcc *brvcc; 242 struct br2684_vcc *brvcc;
246 243
247 pr_debug("br2684_start_xmit, skb_dst(skb)=%p\n", skb_dst(skb)); 244 pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
248 read_lock(&devs_lock); 245 read_lock(&devs_lock);
249 brvcc = pick_outgoing_vcc(skb, brdev); 246 brvcc = pick_outgoing_vcc(skb, brdev);
250 if (brvcc == NULL) { 247 if (brvcc == NULL) {
@@ -300,7 +297,8 @@ static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
300 struct br2684_dev *brdev; 297 struct br2684_dev *brdev;
301 read_lock(&devs_lock); 298 read_lock(&devs_lock);
302 brdev = BRPRIV(br2684_find_dev(&fs.ifspec)); 299 brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
303 if (brdev == NULL || list_empty(&brdev->brvccs) || brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */ 300 if (brdev == NULL || list_empty(&brdev->brvccs) ||
301 brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
304 brvcc = NULL; 302 brvcc = NULL;
305 else 303 else
306 brvcc = list_entry_brvcc(brdev->brvccs.next); 304 brvcc = list_entry_brvcc(brdev->brvccs.next);
@@ -352,7 +350,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
352 struct net_device *net_dev = brvcc->device; 350 struct net_device *net_dev = brvcc->device;
353 struct br2684_dev *brdev = BRPRIV(net_dev); 351 struct br2684_dev *brdev = BRPRIV(net_dev);
354 352
355 pr_debug("br2684_push\n"); 353 pr_debug("\n");
356 354
357 if (unlikely(skb == NULL)) { 355 if (unlikely(skb == NULL)) {
358 /* skb==NULL means VCC is being destroyed */ 356 /* skb==NULL means VCC is being destroyed */
@@ -376,29 +374,25 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
376 __skb_trim(skb, skb->len - 4); 374 __skb_trim(skb, skb->len - 4);
377 375
378 /* accept packets that have "ipv[46]" in the snap header */ 376 /* accept packets that have "ipv[46]" in the snap header */
379 if ((skb->len >= (sizeof(llc_oui_ipv4))) 377 if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
380 && 378 (memcmp(skb->data, llc_oui_ipv4,
381 (memcmp 379 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
382 (skb->data, llc_oui_ipv4, 380 if (memcmp(skb->data + 6, ethertype_ipv6,
383 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) { 381 sizeof(ethertype_ipv6)) == 0)
384 if (memcmp
385 (skb->data + 6, ethertype_ipv6,
386 sizeof(ethertype_ipv6)) == 0)
387 skb->protocol = htons(ETH_P_IPV6); 382 skb->protocol = htons(ETH_P_IPV6);
388 else if (memcmp 383 else if (memcmp(skb->data + 6, ethertype_ipv4,
389 (skb->data + 6, ethertype_ipv4, 384 sizeof(ethertype_ipv4)) == 0)
390 sizeof(ethertype_ipv4)) == 0)
391 skb->protocol = htons(ETH_P_IP); 385 skb->protocol = htons(ETH_P_IP);
392 else 386 else
393 goto error; 387 goto error;
394 skb_pull(skb, sizeof(llc_oui_ipv4)); 388 skb_pull(skb, sizeof(llc_oui_ipv4));
395 skb_reset_network_header(skb); 389 skb_reset_network_header(skb);
396 skb->pkt_type = PACKET_HOST; 390 skb->pkt_type = PACKET_HOST;
397 /* 391 /*
398 * Let us waste some time for checking the encapsulation. 392 * Let us waste some time for checking the encapsulation.
399 * Note, that only 7 char is checked so frames with a valid FCS 393 * Note, that only 7 char is checked so frames with a valid FCS
400 * are also accepted (but FCS is not checked of course). 394 * are also accepted (but FCS is not checked of course).
401 */ 395 */
402 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) && 396 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
403 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) { 397 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
404 skb_pull(skb, sizeof(llc_oui_pid_pad)); 398 skb_pull(skb, sizeof(llc_oui_pid_pad));
@@ -479,8 +473,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
479 write_lock_irq(&devs_lock); 473 write_lock_irq(&devs_lock);
480 net_dev = br2684_find_dev(&be.ifspec); 474 net_dev = br2684_find_dev(&be.ifspec);
481 if (net_dev == NULL) { 475 if (net_dev == NULL) {
482 printk(KERN_ERR 476 pr_err("tried to attach to non-existant device\n");
483 "br2684: tried to attach to non-existant device\n");
484 err = -ENXIO; 477 err = -ENXIO;
485 goto error; 478 goto error;
486 } 479 }
@@ -494,17 +487,16 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
494 err = -EEXIST; 487 err = -EEXIST;
495 goto error; 488 goto error;
496 } 489 }
497 if (be.fcs_in != BR2684_FCSIN_NO || be.fcs_out != BR2684_FCSOUT_NO || 490 if (be.fcs_in != BR2684_FCSIN_NO ||
498 be.fcs_auto || be.has_vpiid || be.send_padding || (be.encaps != 491 be.fcs_out != BR2684_FCSOUT_NO ||
499 BR2684_ENCAPS_VC 492 be.fcs_auto || be.has_vpiid || be.send_padding ||
500 && be.encaps != 493 (be.encaps != BR2684_ENCAPS_VC &&
501 BR2684_ENCAPS_LLC) 494 be.encaps != BR2684_ENCAPS_LLC) ||
502 || be.min_size != 0) { 495 be.min_size != 0) {
503 err = -EINVAL; 496 err = -EINVAL;
504 goto error; 497 goto error;
505 } 498 }
506 pr_debug("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, 499 pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
507 be.encaps, brvcc);
508 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) { 500 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
509 unsigned char *esi = atmvcc->dev->esi; 501 unsigned char *esi = atmvcc->dev->esi;
510 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5]) 502 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
@@ -541,7 +533,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
541 } 533 }
542 __module_get(THIS_MODULE); 534 __module_get(THIS_MODULE);
543 return 0; 535 return 0;
544 error: 536
537error:
545 write_unlock_irq(&devs_lock); 538 write_unlock_irq(&devs_lock);
546 kfree(brvcc); 539 kfree(brvcc);
547 return err; 540 return err;
@@ -587,7 +580,7 @@ static void br2684_setup_routed(struct net_device *netdev)
587 INIT_LIST_HEAD(&brdev->brvccs); 580 INIT_LIST_HEAD(&brdev->brvccs);
588} 581}
589 582
590static int br2684_create(void __user * arg) 583static int br2684_create(void __user *arg)
591{ 584{
592 int err; 585 int err;
593 struct net_device *netdev; 586 struct net_device *netdev;
@@ -595,11 +588,10 @@ static int br2684_create(void __user * arg)
595 struct atm_newif_br2684 ni; 588 struct atm_newif_br2684 ni;
596 enum br2684_payload payload; 589 enum br2684_payload payload;
597 590
598 pr_debug("br2684_create\n"); 591 pr_debug("\n");
599 592
600 if (copy_from_user(&ni, arg, sizeof ni)) { 593 if (copy_from_user(&ni, arg, sizeof ni))
601 return -EFAULT; 594 return -EFAULT;
602 }
603 595
604 if (ni.media & BR2684_FLAG_ROUTED) 596 if (ni.media & BR2684_FLAG_ROUTED)
605 payload = p_routed; 597 payload = p_routed;
@@ -607,9 +599,8 @@ static int br2684_create(void __user * arg)
607 payload = p_bridged; 599 payload = p_bridged;
608 ni.media &= 0xffff; /* strip flags */ 600 ni.media &= 0xffff; /* strip flags */
609 601
610 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500) { 602 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
611 return -EINVAL; 603 return -EINVAL;
612 }
613 604
614 netdev = alloc_netdev(sizeof(struct br2684_dev), 605 netdev = alloc_netdev(sizeof(struct br2684_dev),
615 ni.ifname[0] ? ni.ifname : "nas%d", 606 ni.ifname[0] ? ni.ifname : "nas%d",
@@ -624,7 +615,7 @@ static int br2684_create(void __user * arg)
624 /* open, stop, do_ioctl ? */ 615 /* open, stop, do_ioctl ? */
625 err = register_netdev(netdev); 616 err = register_netdev(netdev);
626 if (err < 0) { 617 if (err < 0) {
627 printk(KERN_ERR "br2684_create: register_netdev failed\n"); 618 pr_err("register_netdev failed\n");
628 free_netdev(netdev); 619 free_netdev(netdev);
629 return err; 620 return err;
630 } 621 }
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 64629c354343..313aba11316b 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -2,6 +2,8 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6
5#include <linux/string.h> 7#include <linux/string.h>
6#include <linux/errno.h> 8#include <linux/errno.h>
7#include <linux/kernel.h> /* for UINT_MAX */ 9#include <linux/kernel.h> /* for UINT_MAX */
@@ -28,12 +30,13 @@
28#include <linux/seq_file.h> 30#include <linux/seq_file.h>
29#include <linux/rcupdate.h> 31#include <linux/rcupdate.h>
30#include <linux/jhash.h> 32#include <linux/jhash.h>
33#include <linux/slab.h>
31#include <net/route.h> /* for struct rtable and routing */ 34#include <net/route.h> /* for struct rtable and routing */
32#include <net/icmp.h> /* icmp_send */ 35#include <net/icmp.h> /* icmp_send */
33#include <asm/param.h> /* for HZ */ 36#include <linux/param.h> /* for HZ */
37#include <linux/uaccess.h>
34#include <asm/byteorder.h> /* for htons etc. */ 38#include <asm/byteorder.h> /* for htons etc. */
35#include <asm/system.h> /* save/restore_flags */ 39#include <asm/system.h> /* save/restore_flags */
36#include <asm/uaccess.h>
37#include <asm/atomic.h> 40#include <asm/atomic.h>
38 41
39#include "common.h" 42#include "common.h"
@@ -51,13 +54,13 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
51 struct atmarp_ctrl *ctrl; 54 struct atmarp_ctrl *ctrl;
52 struct sk_buff *skb; 55 struct sk_buff *skb;
53 56
54 pr_debug("to_atmarpd(%d)\n", type); 57 pr_debug("(%d)\n", type);
55 if (!atmarpd) 58 if (!atmarpd)
56 return -EUNATCH; 59 return -EUNATCH;
57 skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC); 60 skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
58 if (!skb) 61 if (!skb)
59 return -ENOMEM; 62 return -ENOMEM;
60 ctrl = (struct atmarp_ctrl *) skb_put(skb,sizeof(struct atmarp_ctrl)); 63 ctrl = (struct atmarp_ctrl *)skb_put(skb, sizeof(struct atmarp_ctrl));
61 ctrl->type = type; 64 ctrl->type = type;
62 ctrl->itf_num = itf; 65 ctrl->itf_num = itf;
63 ctrl->ip = ip; 66 ctrl->ip = ip;
@@ -71,8 +74,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
71 74
72static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) 75static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
73{ 76{
74 pr_debug("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry, 77 pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh);
75 entry->neigh);
76 clip_vcc->entry = entry; 78 clip_vcc->entry = entry;
77 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ 79 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
78 clip_vcc->next = entry->vccs; 80 clip_vcc->next = entry->vccs;
@@ -86,7 +88,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
86 struct clip_vcc **walk; 88 struct clip_vcc **walk;
87 89
88 if (!entry) { 90 if (!entry) {
89 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); 91 pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
90 return; 92 return;
91 } 93 }
92 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ 94 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@@ -106,13 +108,11 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
106 error = neigh_update(entry->neigh, NULL, NUD_NONE, 108 error = neigh_update(entry->neigh, NULL, NUD_NONE,
107 NEIGH_UPDATE_F_ADMIN); 109 NEIGH_UPDATE_F_ADMIN);
108 if (error) 110 if (error)
109 printk(KERN_CRIT "unlink_clip_vcc: " 111 pr_crit("neigh_update failed with %d\n", error);
110 "neigh_update failed with %d\n", error);
111 goto out; 112 goto out;
112 } 113 }
113 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 114 pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
114 "0x%p)\n", entry, clip_vcc); 115out:
115 out:
116 netif_tx_unlock_bh(entry->neigh->dev); 116 netif_tx_unlock_bh(entry->neigh->dev);
117} 117}
118 118
@@ -127,7 +127,7 @@ static int neigh_check_cb(struct neighbour *n)
127 127
128 if (cv->idle_timeout && time_after(jiffies, exp)) { 128 if (cv->idle_timeout && time_after(jiffies, exp)) {
129 pr_debug("releasing vcc %p->%p of entry %p\n", 129 pr_debug("releasing vcc %p->%p of entry %p\n",
130 cv, cv->vcc, entry); 130 cv, cv->vcc, entry);
131 vcc_release_async(cv->vcc, -ETIMEDOUT); 131 vcc_release_async(cv->vcc, -ETIMEDOUT);
132 } 132 }
133 } 133 }
@@ -139,7 +139,7 @@ static int neigh_check_cb(struct neighbour *n)
139 struct sk_buff *skb; 139 struct sk_buff *skb;
140 140
141 pr_debug("destruction postponed with ref %d\n", 141 pr_debug("destruction postponed with ref %d\n",
142 atomic_read(&n->refcnt)); 142 atomic_read(&n->refcnt));
143 143
144 while ((skb = skb_dequeue(&n->arp_queue)) != NULL) 144 while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
145 dev_kfree_skb(skb); 145 dev_kfree_skb(skb);
@@ -163,7 +163,7 @@ static int clip_arp_rcv(struct sk_buff *skb)
163{ 163{
164 struct atm_vcc *vcc; 164 struct atm_vcc *vcc;
165 165
166 pr_debug("clip_arp_rcv\n"); 166 pr_debug("\n");
167 vcc = ATM_SKB(skb)->vcc; 167 vcc = ATM_SKB(skb)->vcc;
168 if (!vcc || !atm_charge(vcc, skb->truesize)) { 168 if (!vcc || !atm_charge(vcc, skb->truesize)) {
169 dev_kfree_skb_any(skb); 169 dev_kfree_skb_any(skb);
@@ -188,7 +188,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
188{ 188{
189 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 189 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
190 190
191 pr_debug("clip push\n"); 191 pr_debug("\n");
192 if (!skb) { 192 if (!skb) {
193 pr_debug("removing VCC %p\n", clip_vcc); 193 pr_debug("removing VCC %p\n", clip_vcc);
194 if (clip_vcc->entry) 194 if (clip_vcc->entry)
@@ -206,12 +206,12 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
206 } 206 }
207 ATM_SKB(skb)->vcc = vcc; 207 ATM_SKB(skb)->vcc = vcc;
208 skb_reset_mac_header(skb); 208 skb_reset_mac_header(skb);
209 if (!clip_vcc->encap 209 if (!clip_vcc->encap ||
210 || skb->len < RFC1483LLC_LEN 210 skb->len < RFC1483LLC_LEN ||
211 || memcmp(skb->data, llc_oui, sizeof (llc_oui))) 211 memcmp(skb->data, llc_oui, sizeof(llc_oui)))
212 skb->protocol = htons(ETH_P_IP); 212 skb->protocol = htons(ETH_P_IP);
213 else { 213 else {
214 skb->protocol = ((__be16 *) skb->data)[3]; 214 skb->protocol = ((__be16 *)skb->data)[3];
215 skb_pull(skb, RFC1483LLC_LEN); 215 skb_pull(skb, RFC1483LLC_LEN);
216 if (skb->protocol == htons(ETH_P_ARP)) { 216 if (skb->protocol == htons(ETH_P_ARP)) {
217 skb->dev->stats.rx_packets++; 217 skb->dev->stats.rx_packets++;
@@ -239,7 +239,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
239 int old; 239 int old;
240 unsigned long flags; 240 unsigned long flags;
241 241
242 pr_debug("clip_pop(vcc %p)\n", vcc); 242 pr_debug("(vcc %p)\n", vcc);
243 clip_vcc->old_pop(vcc, skb); 243 clip_vcc->old_pop(vcc, skb);
244 /* skb->dev == NULL in outbound ARP packets */ 244 /* skb->dev == NULL in outbound ARP packets */
245 if (!dev) 245 if (!dev)
@@ -255,7 +255,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
255 255
256static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) 256static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
257{ 257{
258 pr_debug("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb); 258 pr_debug("(neigh %p, skb %p)\n", neigh, skb);
259 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip); 259 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
260} 260}
261 261
@@ -284,7 +284,7 @@ static int clip_constructor(struct neighbour *neigh)
284 struct in_device *in_dev; 284 struct in_device *in_dev;
285 struct neigh_parms *parms; 285 struct neigh_parms *parms;
286 286
287 pr_debug("clip_constructor (neigh %p, entry %p)\n", neigh, entry); 287 pr_debug("(neigh %p, entry %p)\n", neigh, entry);
288 neigh->type = inet_addr_type(&init_net, entry->ip); 288 neigh->type = inet_addr_type(&init_net, entry->ip);
289 if (neigh->type != RTN_UNICAST) 289 if (neigh->type != RTN_UNICAST)
290 return -EINVAL; 290 return -EINVAL;
@@ -369,9 +369,9 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
369 int old; 369 int old;
370 unsigned long flags; 370 unsigned long flags;
371 371
372 pr_debug("clip_start_xmit (skb %p)\n", skb); 372 pr_debug("(skb %p)\n", skb);
373 if (!skb_dst(skb)) { 373 if (!skb_dst(skb)) {
374 printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n"); 374 pr_err("skb_dst(skb) == NULL\n");
375 dev_kfree_skb(skb); 375 dev_kfree_skb(skb);
376 dev->stats.tx_dropped++; 376 dev->stats.tx_dropped++;
377 return NETDEV_TX_OK; 377 return NETDEV_TX_OK;
@@ -385,7 +385,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
385 return 0; 385 return 0;
386 } 386 }
387#endif 387#endif
388 printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); 388 pr_err("NO NEIGHBOUR !\n");
389 dev_kfree_skb(skb); 389 dev_kfree_skb(skb);
390 dev->stats.tx_dropped++; 390 dev->stats.tx_dropped++;
391 return NETDEV_TX_OK; 391 return NETDEV_TX_OK;
@@ -421,7 +421,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
421 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); 421 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
422 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ 422 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
423 if (old) { 423 if (old) {
424 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); 424 pr_warning("XOFF->XOFF transition\n");
425 return NETDEV_TX_OK; 425 return NETDEV_TX_OK;
426 } 426 }
427 dev->stats.tx_packets++; 427 dev->stats.tx_packets++;
@@ -456,7 +456,7 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
456 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); 456 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
457 if (!clip_vcc) 457 if (!clip_vcc)
458 return -ENOMEM; 458 return -ENOMEM;
459 pr_debug("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc); 459 pr_debug("%p vcc %p\n", clip_vcc, vcc);
460 clip_vcc->vcc = vcc; 460 clip_vcc->vcc = vcc;
461 vcc->user_back = clip_vcc; 461 vcc->user_back = clip_vcc;
462 set_bit(ATM_VF_IS_CLIP, &vcc->flags); 462 set_bit(ATM_VF_IS_CLIP, &vcc->flags);
@@ -506,16 +506,16 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
506 struct rtable *rt; 506 struct rtable *rt;
507 507
508 if (vcc->push != clip_push) { 508 if (vcc->push != clip_push) {
509 printk(KERN_WARNING "clip_setentry: non-CLIP VCC\n"); 509 pr_warning("non-CLIP VCC\n");
510 return -EBADF; 510 return -EBADF;
511 } 511 }
512 clip_vcc = CLIP_VCC(vcc); 512 clip_vcc = CLIP_VCC(vcc);
513 if (!ip) { 513 if (!ip) {
514 if (!clip_vcc->entry) { 514 if (!clip_vcc->entry) {
515 printk(KERN_ERR "hiding hidden ATMARP entry\n"); 515 pr_err("hiding hidden ATMARP entry\n");
516 return 0; 516 return 0;
517 } 517 }
518 pr_debug("setentry: remove\n"); 518 pr_debug("remove\n");
519 unlink_clip_vcc(clip_vcc); 519 unlink_clip_vcc(clip_vcc);
520 return 0; 520 return 0;
521 } 521 }
@@ -529,9 +529,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
529 entry = NEIGH2ENTRY(neigh); 529 entry = NEIGH2ENTRY(neigh);
530 if (entry != clip_vcc->entry) { 530 if (entry != clip_vcc->entry) {
531 if (!clip_vcc->entry) 531 if (!clip_vcc->entry)
532 pr_debug("setentry: add\n"); 532 pr_debug("add\n");
533 else { 533 else {
534 pr_debug("setentry: update\n"); 534 pr_debug("update\n");
535 unlink_clip_vcc(clip_vcc); 535 unlink_clip_vcc(clip_vcc);
536 } 536 }
537 link_vcc(clip_vcc, entry); 537 link_vcc(clip_vcc, entry);
@@ -614,16 +614,16 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
614 614
615 switch (event) { 615 switch (event) {
616 case NETDEV_UP: 616 case NETDEV_UP:
617 pr_debug("clip_device_event NETDEV_UP\n"); 617 pr_debug("NETDEV_UP\n");
618 to_atmarpd(act_up, PRIV(dev)->number, 0); 618 to_atmarpd(act_up, PRIV(dev)->number, 0);
619 break; 619 break;
620 case NETDEV_GOING_DOWN: 620 case NETDEV_GOING_DOWN:
621 pr_debug("clip_device_event NETDEV_DOWN\n"); 621 pr_debug("NETDEV_DOWN\n");
622 to_atmarpd(act_down, PRIV(dev)->number, 0); 622 to_atmarpd(act_down, PRIV(dev)->number, 0);
623 break; 623 break;
624 case NETDEV_CHANGE: 624 case NETDEV_CHANGE:
625 case NETDEV_CHANGEMTU: 625 case NETDEV_CHANGEMTU:
626 pr_debug("clip_device_event NETDEV_CHANGE*\n"); 626 pr_debug("NETDEV_CHANGE*\n");
627 to_atmarpd(act_change, PRIV(dev)->number, 0); 627 to_atmarpd(act_change, PRIV(dev)->number, 0);
628 break; 628 break;
629 } 629 }
@@ -645,7 +645,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
645 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev); 645 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev);
646} 646}
647 647
648
649static struct notifier_block clip_dev_notifier = { 648static struct notifier_block clip_dev_notifier = {
650 .notifier_call = clip_device_event, 649 .notifier_call = clip_device_event,
651}; 650};
@@ -660,7 +659,7 @@ static struct notifier_block clip_inet_notifier = {
660 659
661static void atmarpd_close(struct atm_vcc *vcc) 660static void atmarpd_close(struct atm_vcc *vcc)
662{ 661{
663 pr_debug("atmarpd_close\n"); 662 pr_debug("\n");
664 663
665 rtnl_lock(); 664 rtnl_lock();
666 atmarpd = NULL; 665 atmarpd = NULL;
@@ -671,7 +670,6 @@ static void atmarpd_close(struct atm_vcc *vcc)
671 module_put(THIS_MODULE); 670 module_put(THIS_MODULE);
672} 671}
673 672
674
675static struct atmdev_ops atmarpd_dev_ops = { 673static struct atmdev_ops atmarpd_dev_ops = {
676 .close = atmarpd_close 674 .close = atmarpd_close
677}; 675};
@@ -693,11 +691,11 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
693 return -EADDRINUSE; 691 return -EADDRINUSE;
694 } 692 }
695 693
696 mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ); 694 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
697 695
698 atmarpd = vcc; 696 atmarpd = vcc;
699 set_bit(ATM_VF_META,&vcc->flags); 697 set_bit(ATM_VF_META, &vcc->flags);
700 set_bit(ATM_VF_READY,&vcc->flags); 698 set_bit(ATM_VF_READY, &vcc->flags);
701 /* allow replies and avoid getting closed if signaling dies */ 699 /* allow replies and avoid getting closed if signaling dies */
702 vcc->dev = &atmarpd_dev; 700 vcc->dev = &atmarpd_dev;
703 vcc_insert_socket(sk_atm(vcc)); 701 vcc_insert_socket(sk_atm(vcc));
@@ -950,8 +948,7 @@ static int __init atm_clip_init(void)
950 948
951 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); 949 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
952 if (!p) { 950 if (!p) {
953 printk(KERN_ERR "Unable to initialize " 951 pr_err("Unable to initialize /proc/net/atm/arp\n");
954 "/proc/net/atm/arp\n");
955 atm_clip_exit_noproc(); 952 atm_clip_exit_noproc();
956 return -ENOMEM; 953 return -ENOMEM;
957 } 954 }
diff --git a/net/atm/common.c b/net/atm/common.c
index d61e051e0a3f..97ed94aa0cbc 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/kmod.h> 8#include <linux/kmod.h>
@@ -17,12 +18,12 @@
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/bitops.h> 19#include <linux/bitops.h>
19#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/slab.h>
20#include <net/sock.h> /* struct sock */ 22#include <net/sock.h> /* struct sock */
23#include <linux/uaccess.h>
24#include <linux/poll.h>
21 25
22#include <asm/uaccess.h>
23#include <asm/atomic.h> 26#include <asm/atomic.h>
24#include <asm/poll.h>
25
26 27
27#include "resources.h" /* atm_find_dev */ 28#include "resources.h" /* atm_find_dev */
28#include "common.h" /* prototypes */ 29#include "common.h" /* prototypes */
@@ -31,13 +32,15 @@
31#include "signaling.h" /* for WAITING and sigd_attach */ 32#include "signaling.h" /* for WAITING and sigd_attach */
32 33
33struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; 34struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
35EXPORT_SYMBOL(vcc_hash);
36
34DEFINE_RWLOCK(vcc_sklist_lock); 37DEFINE_RWLOCK(vcc_sklist_lock);
38EXPORT_SYMBOL(vcc_sklist_lock);
35 39
36static void __vcc_insert_socket(struct sock *sk) 40static void __vcc_insert_socket(struct sock *sk)
37{ 41{
38 struct atm_vcc *vcc = atm_sk(sk); 42 struct atm_vcc *vcc = atm_sk(sk);
39 struct hlist_head *head = &vcc_hash[vcc->vci & 43 struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
40 (VCC_HTABLE_SIZE - 1)];
41 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); 44 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
42 sk_add_node(sk, head); 45 sk_add_node(sk, head);
43} 46}
@@ -48,6 +51,7 @@ void vcc_insert_socket(struct sock *sk)
48 __vcc_insert_socket(sk); 51 __vcc_insert_socket(sk);
49 write_unlock_irq(&vcc_sklist_lock); 52 write_unlock_irq(&vcc_sklist_lock);
50} 53}
54EXPORT_SYMBOL(vcc_insert_socket);
51 55
52static void vcc_remove_socket(struct sock *sk) 56static void vcc_remove_socket(struct sock *sk)
53{ 57{
@@ -56,37 +60,32 @@ static void vcc_remove_socket(struct sock *sk)
56 write_unlock_irq(&vcc_sklist_lock); 60 write_unlock_irq(&vcc_sklist_lock);
57} 61}
58 62
59 63static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
60static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size)
61{ 64{
62 struct sk_buff *skb; 65 struct sk_buff *skb;
63 struct sock *sk = sk_atm(vcc); 66 struct sock *sk = sk_atm(vcc);
64 67
65 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { 68 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
66 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", 69 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
67 sk_wmem_alloc_get(sk), size, 70 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
68 sk->sk_sndbuf);
69 return NULL; 71 return NULL;
70 } 72 }
71 while (!(skb = alloc_skb(size, GFP_KERNEL))) 73 while (!(skb = alloc_skb(size, GFP_KERNEL)))
72 schedule(); 74 schedule();
73 pr_debug("AlTx %d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); 75 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
74 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 76 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
75 return skb; 77 return skb;
76} 78}
77 79
78
79EXPORT_SYMBOL(vcc_hash);
80EXPORT_SYMBOL(vcc_sklist_lock);
81EXPORT_SYMBOL(vcc_insert_socket);
82
83static void vcc_sock_destruct(struct sock *sk) 80static void vcc_sock_destruct(struct sock *sk)
84{ 81{
85 if (atomic_read(&sk->sk_rmem_alloc)) 82 if (atomic_read(&sk->sk_rmem_alloc))
86 printk(KERN_DEBUG "vcc_sock_destruct: rmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_rmem_alloc)); 83 printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
84 __func__, atomic_read(&sk->sk_rmem_alloc));
87 85
88 if (atomic_read(&sk->sk_wmem_alloc)) 86 if (atomic_read(&sk->sk_wmem_alloc))
89 printk(KERN_DEBUG "vcc_sock_destruct: wmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_wmem_alloc)); 87 printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
88 __func__, atomic_read(&sk->sk_wmem_alloc));
90} 89}
91 90
92static void vcc_def_wakeup(struct sock *sk) 91static void vcc_def_wakeup(struct sock *sk)
@@ -142,8 +141,8 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
142 141
143 vcc = atm_sk(sk); 142 vcc = atm_sk(sk);
144 vcc->dev = NULL; 143 vcc->dev = NULL;
145 memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); 144 memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
146 memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); 145 memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
147 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ 146 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
148 atomic_set(&sk->sk_wmem_alloc, 1); 147 atomic_set(&sk->sk_wmem_alloc, 1);
149 atomic_set(&sk->sk_rmem_alloc, 0); 148 atomic_set(&sk->sk_rmem_alloc, 0);
@@ -156,7 +155,6 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
156 return 0; 155 return 0;
157} 156}
158 157
159
160static void vcc_destroy_socket(struct sock *sk) 158static void vcc_destroy_socket(struct sock *sk)
161{ 159{
162 struct atm_vcc *vcc = atm_sk(sk); 160 struct atm_vcc *vcc = atm_sk(sk);
@@ -171,7 +169,7 @@ static void vcc_destroy_socket(struct sock *sk)
171 vcc->push(vcc, NULL); /* atmarpd has no push */ 169 vcc->push(vcc, NULL); /* atmarpd has no push */
172 170
173 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 171 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
174 atm_return(vcc,skb->truesize); 172 atm_return(vcc, skb->truesize);
175 kfree_skb(skb); 173 kfree_skb(skb);
176 } 174 }
177 175
@@ -182,7 +180,6 @@ static void vcc_destroy_socket(struct sock *sk)
182 vcc_remove_socket(sk); 180 vcc_remove_socket(sk);
183} 181}
184 182
185
186int vcc_release(struct socket *sock) 183int vcc_release(struct socket *sock)
187{ 184{
188 struct sock *sk = sock->sk; 185 struct sock *sk = sock->sk;
@@ -197,7 +194,6 @@ int vcc_release(struct socket *sock)
197 return 0; 194 return 0;
198} 195}
199 196
200
201void vcc_release_async(struct atm_vcc *vcc, int reply) 197void vcc_release_async(struct atm_vcc *vcc, int reply)
202{ 198{
203 struct sock *sk = sk_atm(vcc); 199 struct sock *sk = sk_atm(vcc);
@@ -208,8 +204,6 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
208 clear_bit(ATM_VF_WAITING, &vcc->flags); 204 clear_bit(ATM_VF_WAITING, &vcc->flags);
209 sk->sk_state_change(sk); 205 sk->sk_state_change(sk);
210} 206}
211
212
213EXPORT_SYMBOL(vcc_release_async); 207EXPORT_SYMBOL(vcc_release_async);
214 208
215 209
@@ -235,37 +229,37 @@ void atm_dev_release_vccs(struct atm_dev *dev)
235 write_unlock_irq(&vcc_sklist_lock); 229 write_unlock_irq(&vcc_sklist_lock);
236} 230}
237 231
238 232static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
239static int adjust_tp(struct atm_trafprm *tp,unsigned char aal)
240{ 233{
241 int max_sdu; 234 int max_sdu;
242 235
243 if (!tp->traffic_class) return 0; 236 if (!tp->traffic_class)
237 return 0;
244 switch (aal) { 238 switch (aal) {
245 case ATM_AAL0: 239 case ATM_AAL0:
246 max_sdu = ATM_CELL_SIZE-1; 240 max_sdu = ATM_CELL_SIZE-1;
247 break; 241 break;
248 case ATM_AAL34: 242 case ATM_AAL34:
249 max_sdu = ATM_MAX_AAL34_PDU; 243 max_sdu = ATM_MAX_AAL34_PDU;
250 break; 244 break;
251 default: 245 default:
252 printk(KERN_WARNING "ATM: AAL problems ... " 246 pr_warning("AAL problems ... (%d)\n", aal);
253 "(%d)\n",aal); 247 /* fall through */
254 /* fall through */ 248 case ATM_AAL5:
255 case ATM_AAL5: 249 max_sdu = ATM_MAX_AAL5_PDU;
256 max_sdu = ATM_MAX_AAL5_PDU;
257 } 250 }
258 if (!tp->max_sdu) tp->max_sdu = max_sdu; 251 if (!tp->max_sdu)
259 else if (tp->max_sdu > max_sdu) return -EINVAL; 252 tp->max_sdu = max_sdu;
260 if (!tp->max_cdv) tp->max_cdv = ATM_MAX_CDV; 253 else if (tp->max_sdu > max_sdu)
254 return -EINVAL;
255 if (!tp->max_cdv)
256 tp->max_cdv = ATM_MAX_CDV;
261 return 0; 257 return 0;
262} 258}
263 259
264
265static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) 260static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
266{ 261{
267 struct hlist_head *head = &vcc_hash[vci & 262 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
268 (VCC_HTABLE_SIZE - 1)];
269 struct hlist_node *node; 263 struct hlist_node *node;
270 struct sock *s; 264 struct sock *s;
271 struct atm_vcc *walk; 265 struct atm_vcc *walk;
@@ -289,7 +283,6 @@ static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
289 return 0; 283 return 0;
290} 284}
291 285
292
293static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci) 286static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
294{ 287{
295 static short p; /* poor man's per-device cache */ 288 static short p; /* poor man's per-device cache */
@@ -327,14 +320,13 @@ static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
327 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) && 320 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
328 *vpi == ATM_VPI_ANY) { 321 *vpi == ATM_VPI_ANY) {
329 p++; 322 p++;
330 if (p >= 1 << vcc->dev->ci_range.vpi_bits) p = 0; 323 if (p >= 1 << vcc->dev->ci_range.vpi_bits)
324 p = 0;
331 } 325 }
332 } 326 } while (old_p != p || old_c != c);
333 while (old_p != p || old_c != c);
334 return -EADDRINUSE; 327 return -EADDRINUSE;
335} 328}
336 329
337
338static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, 330static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
339 int vci) 331 int vci)
340{ 332{
@@ -362,37 +354,46 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
362 __vcc_insert_socket(sk); 354 __vcc_insert_socket(sk);
363 write_unlock_irq(&vcc_sklist_lock); 355 write_unlock_irq(&vcc_sklist_lock);
364 switch (vcc->qos.aal) { 356 switch (vcc->qos.aal) {
365 case ATM_AAL0: 357 case ATM_AAL0:
366 error = atm_init_aal0(vcc); 358 error = atm_init_aal0(vcc);
367 vcc->stats = &dev->stats.aal0; 359 vcc->stats = &dev->stats.aal0;
368 break; 360 break;
369 case ATM_AAL34: 361 case ATM_AAL34:
370 error = atm_init_aal34(vcc); 362 error = atm_init_aal34(vcc);
371 vcc->stats = &dev->stats.aal34; 363 vcc->stats = &dev->stats.aal34;
372 break; 364 break;
373 case ATM_NO_AAL: 365 case ATM_NO_AAL:
374 /* ATM_AAL5 is also used in the "0 for default" case */ 366 /* ATM_AAL5 is also used in the "0 for default" case */
375 vcc->qos.aal = ATM_AAL5; 367 vcc->qos.aal = ATM_AAL5;
376 /* fall through */ 368 /* fall through */
377 case ATM_AAL5: 369 case ATM_AAL5:
378 error = atm_init_aal5(vcc); 370 error = atm_init_aal5(vcc);
379 vcc->stats = &dev->stats.aal5; 371 vcc->stats = &dev->stats.aal5;
380 break; 372 break;
381 default: 373 default:
382 error = -EPROTOTYPE; 374 error = -EPROTOTYPE;
383 } 375 }
384 if (!error) error = adjust_tp(&vcc->qos.txtp,vcc->qos.aal); 376 if (!error)
385 if (!error) error = adjust_tp(&vcc->qos.rxtp,vcc->qos.aal); 377 error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
378 if (!error)
379 error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
386 if (error) 380 if (error)
387 goto fail; 381 goto fail;
388 pr_debug("VCC %d.%d, AAL %d\n",vpi,vci,vcc->qos.aal); 382 pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
389 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",vcc->qos.txtp.traffic_class, 383 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
390 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu); 384 vcc->qos.txtp.traffic_class,
391 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",vcc->qos.rxtp.traffic_class, 385 vcc->qos.txtp.min_pcr,
392 vcc->qos.rxtp.min_pcr,vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu); 386 vcc->qos.txtp.max_pcr,
387 vcc->qos.txtp.max_sdu);
388 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
389 vcc->qos.rxtp.traffic_class,
390 vcc->qos.rxtp.min_pcr,
391 vcc->qos.rxtp.max_pcr,
392 vcc->qos.rxtp.max_sdu);
393 393
394 if (dev->ops->open) { 394 if (dev->ops->open) {
395 if ((error = dev->ops->open(vcc))) 395 error = dev->ops->open(vcc);
396 if (error)
396 goto fail; 397 goto fail;
397 } 398 }
398 return 0; 399 return 0;
@@ -406,14 +407,13 @@ fail_module_put:
406 return error; 407 return error;
407} 408}
408 409
409
410int vcc_connect(struct socket *sock, int itf, short vpi, int vci) 410int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
411{ 411{
412 struct atm_dev *dev; 412 struct atm_dev *dev;
413 struct atm_vcc *vcc = ATM_SD(sock); 413 struct atm_vcc *vcc = ATM_SD(sock);
414 int error; 414 int error;
415 415
416 pr_debug("vcc_connect (vpi %d, vci %d)\n",vpi,vci); 416 pr_debug("(vpi %d, vci %d)\n", vpi, vci);
417 if (sock->state == SS_CONNECTED) 417 if (sock->state == SS_CONNECTED)
418 return -EISCONN; 418 return -EISCONN;
419 if (sock->state != SS_UNCONNECTED) 419 if (sock->state != SS_UNCONNECTED)
@@ -422,30 +422,33 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
422 return -EINVAL; 422 return -EINVAL;
423 423
424 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC) 424 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
425 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 425 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
426 else 426 else
427 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) 427 if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
428 return -EINVAL; 428 return -EINVAL;
429 pr_debug("vcc_connect (TX: cl %d,bw %d-%d,sdu %d; " 429 pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
430 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n", 430 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
431 vcc->qos.txtp.traffic_class,vcc->qos.txtp.min_pcr, 431 vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
432 vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu, 432 vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
433 vcc->qos.rxtp.traffic_class,vcc->qos.rxtp.min_pcr, 433 vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
434 vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu, 434 vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
435 vcc->qos.aal == ATM_AAL5 ? "" : vcc->qos.aal == ATM_AAL0 ? "" : 435 vcc->qos.aal == ATM_AAL5 ? "" :
436 " ??? code ",vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal); 436 vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
437 vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
437 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) 438 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
438 return -EBADFD; 439 return -EBADFD;
439 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || 440 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
440 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) 441 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
441 return -EINVAL; 442 return -EINVAL;
442 if (likely(itf != ATM_ITF_ANY)) { 443 if (likely(itf != ATM_ITF_ANY)) {
443 dev = try_then_request_module(atm_dev_lookup(itf), "atm-device-%d", itf); 444 dev = try_then_request_module(atm_dev_lookup(itf),
445 "atm-device-%d", itf);
444 } else { 446 } else {
445 dev = NULL; 447 dev = NULL;
446 mutex_lock(&atm_dev_mutex); 448 mutex_lock(&atm_dev_mutex);
447 if (!list_empty(&atm_devs)) { 449 if (!list_empty(&atm_devs)) {
448 dev = list_entry(atm_devs.next, struct atm_dev, dev_list); 450 dev = list_entry(atm_devs.next,
451 struct atm_dev, dev_list);
449 atm_dev_hold(dev); 452 atm_dev_hold(dev);
450 } 453 }
451 mutex_unlock(&atm_dev_mutex); 454 mutex_unlock(&atm_dev_mutex);
@@ -458,13 +461,12 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
458 return error; 461 return error;
459 } 462 }
460 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) 463 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
461 set_bit(ATM_VF_PARTIAL,&vcc->flags); 464 set_bit(ATM_VF_PARTIAL, &vcc->flags);
462 if (test_bit(ATM_VF_READY,&ATM_SD(sock)->flags)) 465 if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
463 sock->state = SS_CONNECTED; 466 sock->state = SS_CONNECTED;
464 return 0; 467 return 0;
465} 468}
466 469
467
468int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 470int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
469 size_t size, int flags) 471 size_t size, int flags)
470{ 472{
@@ -478,8 +480,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
478 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */ 480 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
479 return -EOPNOTSUPP; 481 return -EOPNOTSUPP;
480 vcc = ATM_SD(sock); 482 vcc = ATM_SD(sock);
481 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 483 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
482 test_bit(ATM_VF_CLOSE,&vcc->flags) || 484 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
483 !test_bit(ATM_VF_READY, &vcc->flags)) 485 !test_bit(ATM_VF_READY, &vcc->flags))
484 return 0; 486 return 0;
485 487
@@ -497,13 +499,12 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
497 if (error) 499 if (error)
498 return error; 500 return error;
499 sock_recv_ts_and_drops(msg, sk, skb); 501 sock_recv_ts_and_drops(msg, sk, skb);
500 pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); 502 pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
501 atm_return(vcc, skb->truesize); 503 atm_return(vcc, skb->truesize);
502 skb_free_datagram(sk, skb); 504 skb_free_datagram(sk, skb);
503 return copied; 505 return copied;
504} 506}
505 507
506
507int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 508int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
508 size_t total_len) 509 size_t total_len)
509{ 510{
@@ -511,7 +512,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
511 DEFINE_WAIT(wait); 512 DEFINE_WAIT(wait);
512 struct atm_vcc *vcc; 513 struct atm_vcc *vcc;
513 struct sk_buff *skb; 514 struct sk_buff *skb;
514 int eff,error; 515 int eff, error;
515 const void __user *buff; 516 const void __user *buff;
516 int size; 517 int size;
517 518
@@ -550,7 +551,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
550 eff = (size+3) & ~3; /* align to word boundary */ 551 eff = (size+3) & ~3; /* align to word boundary */
551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 552 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
552 error = 0; 553 error = 0;
553 while (!(skb = alloc_tx(vcc,eff))) { 554 while (!(skb = alloc_tx(vcc, eff))) {
554 if (m->msg_flags & MSG_DONTWAIT) { 555 if (m->msg_flags & MSG_DONTWAIT) {
555 error = -EAGAIN; 556 error = -EAGAIN;
556 break; 557 break;
@@ -560,9 +561,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
560 error = -ERESTARTSYS; 561 error = -ERESTARTSYS;
561 break; 562 break;
562 } 563 }
563 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 564 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
564 test_bit(ATM_VF_CLOSE,&vcc->flags) || 565 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
565 !test_bit(ATM_VF_READY,&vcc->flags)) { 566 !test_bit(ATM_VF_READY, &vcc->flags)) {
566 error = -EPIPE; 567 error = -EPIPE;
567 send_sig(SIGPIPE, current, 0); 568 send_sig(SIGPIPE, current, 0);
568 break; 569 break;
@@ -574,20 +575,20 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
574 goto out; 575 goto out;
575 skb->dev = NULL; /* for paths shared with net_device interfaces */ 576 skb->dev = NULL; /* for paths shared with net_device interfaces */
576 ATM_SKB(skb)->atm_options = vcc->atm_options; 577 ATM_SKB(skb)->atm_options = vcc->atm_options;
577 if (copy_from_user(skb_put(skb,size),buff,size)) { 578 if (copy_from_user(skb_put(skb, size), buff, size)) {
578 kfree_skb(skb); 579 kfree_skb(skb);
579 error = -EFAULT; 580 error = -EFAULT;
580 goto out; 581 goto out;
581 } 582 }
582 if (eff != size) memset(skb->data+size,0,eff-size); 583 if (eff != size)
583 error = vcc->dev->ops->send(vcc,skb); 584 memset(skb->data + size, 0, eff-size);
585 error = vcc->dev->ops->send(vcc, skb);
584 error = error ? error : size; 586 error = error ? error : size;
585out: 587out:
586 release_sock(sk); 588 release_sock(sk);
587 return error; 589 return error;
588} 590}
589 591
590
591unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait) 592unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
592{ 593{
593 struct sock *sk = sock->sk; 594 struct sock *sk = sock->sk;
@@ -623,8 +624,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
623 return mask; 624 return mask;
624} 625}
625 626
626 627static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
627static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
628{ 628{
629 int error; 629 int error;
630 630
@@ -636,25 +636,31 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
636 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class || 636 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
637 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class) 637 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
638 return -EINVAL; 638 return -EINVAL;
639 error = adjust_tp(&qos->txtp,qos->aal); 639 error = adjust_tp(&qos->txtp, qos->aal);
640 if (!error) error = adjust_tp(&qos->rxtp,qos->aal); 640 if (!error)
641 if (error) return error; 641 error = adjust_tp(&qos->rxtp, qos->aal);
642 if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP; 642 if (error)
643 return error;
644 if (!vcc->dev->ops->change_qos)
645 return -EOPNOTSUPP;
643 if (sk_atm(vcc)->sk_family == AF_ATMPVC) 646 if (sk_atm(vcc)->sk_family == AF_ATMPVC)
644 return vcc->dev->ops->change_qos(vcc,qos,ATM_MF_SET); 647 return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
645 return svc_change_qos(vcc,qos); 648 return svc_change_qos(vcc, qos);
646} 649}
647 650
648
649static int check_tp(const struct atm_trafprm *tp) 651static int check_tp(const struct atm_trafprm *tp)
650{ 652{
651 /* @@@ Should be merged with adjust_tp */ 653 /* @@@ Should be merged with adjust_tp */
652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; 654 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
655 return 0;
653 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr && 656 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
654 !tp->max_pcr) return -EINVAL; 657 !tp->max_pcr)
655 if (tp->min_pcr == ATM_MAX_PCR) return -EINVAL; 658 return -EINVAL;
659 if (tp->min_pcr == ATM_MAX_PCR)
660 return -EINVAL;
656 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && 661 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
657 tp->min_pcr > tp->max_pcr) return -EINVAL; 662 tp->min_pcr > tp->max_pcr)
663 return -EINVAL;
658 /* 664 /*
659 * We allow pcr to be outside [min_pcr,max_pcr], because later 665 * We allow pcr to be outside [min_pcr,max_pcr], because later
660 * adjustment may still push it in the valid range. 666 * adjustment may still push it in the valid range.
@@ -662,7 +668,6 @@ static int check_tp(const struct atm_trafprm *tp)
662 return 0; 668 return 0;
663} 669}
664 670
665
666static int check_qos(const struct atm_qos *qos) 671static int check_qos(const struct atm_qos *qos)
667{ 672{
668 int error; 673 int error;
@@ -672,9 +677,11 @@ static int check_qos(const struct atm_qos *qos)
672 if (qos->txtp.traffic_class != qos->rxtp.traffic_class && 677 if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
673 qos->txtp.traffic_class && qos->rxtp.traffic_class && 678 qos->txtp.traffic_class && qos->rxtp.traffic_class &&
674 qos->txtp.traffic_class != ATM_ANYCLASS && 679 qos->txtp.traffic_class != ATM_ANYCLASS &&
675 qos->rxtp.traffic_class != ATM_ANYCLASS) return -EINVAL; 680 qos->rxtp.traffic_class != ATM_ANYCLASS)
681 return -EINVAL;
676 error = check_tp(&qos->txtp); 682 error = check_tp(&qos->txtp);
677 if (error) return error; 683 if (error)
684 return error;
678 return check_tp(&qos->rxtp); 685 return check_tp(&qos->rxtp);
679} 686}
680 687
@@ -690,37 +697,41 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
690 697
691 vcc = ATM_SD(sock); 698 vcc = ATM_SD(sock);
692 switch (optname) { 699 switch (optname) {
693 case SO_ATMQOS: 700 case SO_ATMQOS:
694 { 701 {
695 struct atm_qos qos; 702 struct atm_qos qos;
696 703
697 if (copy_from_user(&qos,optval,sizeof(qos))) 704 if (copy_from_user(&qos, optval, sizeof(qos)))
698 return -EFAULT; 705 return -EFAULT;
699 error = check_qos(&qos); 706 error = check_qos(&qos);
700 if (error) return error; 707 if (error)
701 if (sock->state == SS_CONNECTED) 708 return error;
702 return atm_change_qos(vcc,&qos); 709 if (sock->state == SS_CONNECTED)
703 if (sock->state != SS_UNCONNECTED) 710 return atm_change_qos(vcc, &qos);
704 return -EBADFD; 711 if (sock->state != SS_UNCONNECTED)
705 vcc->qos = qos; 712 return -EBADFD;
706 set_bit(ATM_VF_HASQOS,&vcc->flags); 713 vcc->qos = qos;
707 return 0; 714 set_bit(ATM_VF_HASQOS, &vcc->flags);
708 } 715 return 0;
709 case SO_SETCLP:
710 if (get_user(value,(unsigned long __user *)optval))
711 return -EFAULT;
712 if (value) vcc->atm_options |= ATM_ATMOPT_CLP;
713 else vcc->atm_options &= ~ATM_ATMOPT_CLP;
714 return 0;
715 default:
716 if (level == SOL_SOCKET) return -EINVAL;
717 break;
718 } 716 }
719 if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL; 717 case SO_SETCLP:
720 return vcc->dev->ops->setsockopt(vcc,level,optname,optval,optlen); 718 if (get_user(value, (unsigned long __user *)optval))
719 return -EFAULT;
720 if (value)
721 vcc->atm_options |= ATM_ATMOPT_CLP;
722 else
723 vcc->atm_options &= ~ATM_ATMOPT_CLP;
724 return 0;
725 default:
726 if (level == SOL_SOCKET)
727 return -EINVAL;
728 break;
729 }
730 if (!vcc->dev || !vcc->dev->ops->setsockopt)
731 return -EINVAL;
732 return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
721} 733}
722 734
723
724int vcc_getsockopt(struct socket *sock, int level, int optname, 735int vcc_getsockopt(struct socket *sock, int level, int optname,
725 char __user *optval, int __user *optlen) 736 char __user *optval, int __user *optlen)
726{ 737{
@@ -734,33 +745,33 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
734 745
735 vcc = ATM_SD(sock); 746 vcc = ATM_SD(sock);
736 switch (optname) { 747 switch (optname) {
737 case SO_ATMQOS: 748 case SO_ATMQOS:
738 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) 749 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
739 return -EINVAL; 750 return -EINVAL;
740 return copy_to_user(optval,&vcc->qos,sizeof(vcc->qos)) ? 751 return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
741 -EFAULT : 0; 752 ? -EFAULT : 0;
742 case SO_SETCLP: 753 case SO_SETCLP:
743 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 754 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
744 0,(unsigned long __user *)optval) ? -EFAULT : 0; 755 (unsigned long __user *)optval) ? -EFAULT : 0;
745 case SO_ATMPVC: 756 case SO_ATMPVC:
746 { 757 {
747 struct sockaddr_atmpvc pvc; 758 struct sockaddr_atmpvc pvc;
748 759
749 if (!vcc->dev || 760 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
750 !test_bit(ATM_VF_ADDR,&vcc->flags)) 761 return -ENOTCONN;
751 return -ENOTCONN; 762 pvc.sap_family = AF_ATMPVC;
752 pvc.sap_family = AF_ATMPVC; 763 pvc.sap_addr.itf = vcc->dev->number;
753 pvc.sap_addr.itf = vcc->dev->number; 764 pvc.sap_addr.vpi = vcc->vpi;
754 pvc.sap_addr.vpi = vcc->vpi; 765 pvc.sap_addr.vci = vcc->vci;
755 pvc.sap_addr.vci = vcc->vci; 766 return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
756 return copy_to_user(optval,&pvc,sizeof(pvc)) ? 767 }
757 -EFAULT : 0; 768 default:
758 } 769 if (level == SOL_SOCKET)
759 default: 770 return -EINVAL;
760 if (level == SOL_SOCKET) return -EINVAL;
761 break; 771 break;
762 } 772 }
763 if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL; 773 if (!vcc->dev || !vcc->dev->ops->getsockopt)
774 return -EINVAL;
764 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); 775 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
765} 776}
766 777
@@ -768,23 +779,27 @@ static int __init atm_init(void)
768{ 779{
769 int error; 780 int error;
770 781
771 if ((error = proto_register(&vcc_proto, 0)) < 0) 782 error = proto_register(&vcc_proto, 0);
783 if (error < 0)
772 goto out; 784 goto out;
773 785 error = atmpvc_init();
774 if ((error = atmpvc_init()) < 0) { 786 if (error < 0) {
775 printk(KERN_ERR "atmpvc_init() failed with %d\n", error); 787 pr_err("atmpvc_init() failed with %d\n", error);
776 goto out_unregister_vcc_proto; 788 goto out_unregister_vcc_proto;
777 } 789 }
778 if ((error = atmsvc_init()) < 0) { 790 error = atmsvc_init();
779 printk(KERN_ERR "atmsvc_init() failed with %d\n", error); 791 if (error < 0) {
792 pr_err("atmsvc_init() failed with %d\n", error);
780 goto out_atmpvc_exit; 793 goto out_atmpvc_exit;
781 } 794 }
782 if ((error = atm_proc_init()) < 0) { 795 error = atm_proc_init();
783 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 796 if (error < 0) {
797 pr_err("atm_proc_init() failed with %d\n", error);
784 goto out_atmsvc_exit; 798 goto out_atmsvc_exit;
785 } 799 }
786 if ((error = atm_sysfs_init()) < 0) { 800 error = atm_sysfs_init();
787 printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error); 801 if (error < 0) {
802 pr_err("atm_sysfs_init() failed with %d\n", error);
788 goto out_atmproc_exit; 803 goto out_atmproc_exit;
789 } 804 }
790out: 805out:
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 2ea40995dced..62dc8bfe6fe7 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -3,6 +3,7 @@
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4/* 2003 John Levon <levon@movementarian.org> */ 4/* 2003 John Levon <levon@movementarian.org> */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6 7
7#include <linux/module.h> 8#include <linux/module.h>
8#include <linux/kmod.h> 9#include <linux/kmod.h>
@@ -36,6 +37,7 @@ void register_atm_ioctl(struct atm_ioctl *ioctl)
36 list_add_tail(&ioctl->list, &ioctl_list); 37 list_add_tail(&ioctl->list, &ioctl_list);
37 mutex_unlock(&ioctl_mutex); 38 mutex_unlock(&ioctl_mutex);
38} 39}
40EXPORT_SYMBOL(register_atm_ioctl);
39 41
40void deregister_atm_ioctl(struct atm_ioctl *ioctl) 42void deregister_atm_ioctl(struct atm_ioctl *ioctl)
41{ 43{
@@ -43,129 +45,128 @@ void deregister_atm_ioctl(struct atm_ioctl *ioctl)
43 list_del(&ioctl->list); 45 list_del(&ioctl->list);
44 mutex_unlock(&ioctl_mutex); 46 mutex_unlock(&ioctl_mutex);
45} 47}
46
47EXPORT_SYMBOL(register_atm_ioctl);
48EXPORT_SYMBOL(deregister_atm_ioctl); 48EXPORT_SYMBOL(deregister_atm_ioctl);
49 49
50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg, int compat) 50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
51 unsigned long arg, int compat)
51{ 52{
52 struct sock *sk = sock->sk; 53 struct sock *sk = sock->sk;
53 struct atm_vcc *vcc; 54 struct atm_vcc *vcc;
54 int error; 55 int error;
55 struct list_head * pos; 56 struct list_head *pos;
56 void __user *argp = (void __user *)arg; 57 void __user *argp = (void __user *)arg;
57 58
58 vcc = ATM_SD(sock); 59 vcc = ATM_SD(sock);
59 switch (cmd) { 60 switch (cmd) {
60 case SIOCOUTQ: 61 case SIOCOUTQ:
61 if (sock->state != SS_CONNECTED || 62 if (sock->state != SS_CONNECTED ||
62 !test_bit(ATM_VF_READY, &vcc->flags)) { 63 !test_bit(ATM_VF_READY, &vcc->flags)) {
63 error = -EINVAL; 64 error = -EINVAL;
64 goto done; 65 goto done;
65 } 66 }
66 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk), 67 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
67 (int __user *) argp) ? -EFAULT : 0; 68 (int __user *)argp) ? -EFAULT : 0;
69 goto done;
70 case SIOCINQ:
71 {
72 struct sk_buff *skb;
73
74 if (sock->state != SS_CONNECTED) {
75 error = -EINVAL;
68 goto done; 76 goto done;
69 case SIOCINQ: 77 }
70 { 78 skb = skb_peek(&sk->sk_receive_queue);
71 struct sk_buff *skb; 79 error = put_user(skb ? skb->len : 0,
72 80 (int __user *)argp) ? -EFAULT : 0;
73 if (sock->state != SS_CONNECTED) { 81 goto done;
74 error = -EINVAL; 82 }
75 goto done; 83 case SIOCGSTAMP: /* borrowed from IP */
76 }
77 skb = skb_peek(&sk->sk_receive_queue);
78 error = put_user(skb ? skb->len : 0,
79 (int __user *)argp) ? -EFAULT : 0;
80 goto done;
81 }
82 case SIOCGSTAMP: /* borrowed from IP */
83#ifdef CONFIG_COMPAT 84#ifdef CONFIG_COMPAT
84 if (compat) 85 if (compat)
85 error = compat_sock_get_timestamp(sk, argp); 86 error = compat_sock_get_timestamp(sk, argp);
86 else 87 else
87#endif 88#endif
88 error = sock_get_timestamp(sk, argp); 89 error = sock_get_timestamp(sk, argp);
89 goto done; 90 goto done;
90 case SIOCGSTAMPNS: /* borrowed from IP */ 91 case SIOCGSTAMPNS: /* borrowed from IP */
91#ifdef CONFIG_COMPAT 92#ifdef CONFIG_COMPAT
92 if (compat) 93 if (compat)
93 error = compat_sock_get_timestampns(sk, argp); 94 error = compat_sock_get_timestampns(sk, argp);
94 else 95 else
95#endif 96#endif
96 error = sock_get_timestampns(sk, argp); 97 error = sock_get_timestampns(sk, argp);
98 goto done;
99 case ATM_SETSC:
100 if (net_ratelimit())
101 pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
102 current->comm, task_pid_nr(current));
103 error = 0;
104 goto done;
105 case ATMSIGD_CTRL:
106 if (!capable(CAP_NET_ADMIN)) {
107 error = -EPERM;
97 goto done; 108 goto done;
98 case ATM_SETSC: 109 }
99 if (net_ratelimit()) 110 /*
100 printk(KERN_WARNING "ATM_SETSC is obsolete; used by %s:%d\n", 111 * The user/kernel protocol for exchanging signalling
101 current->comm, task_pid_nr(current)); 112 * info uses kernel pointers as opaque references,
102 error = 0; 113 * so the holder of the file descriptor can scribble
114 * on the kernel... so we should make sure that we
115 * have the same privileges that /proc/kcore needs
116 */
117 if (!capable(CAP_SYS_RAWIO)) {
118 error = -EPERM;
103 goto done; 119 goto done;
104 case ATMSIGD_CTRL: 120 }
105 if (!capable(CAP_NET_ADMIN)) {
106 error = -EPERM;
107 goto done;
108 }
109 /*
110 * The user/kernel protocol for exchanging signalling
111 * info uses kernel pointers as opaque references,
112 * so the holder of the file descriptor can scribble
113 * on the kernel... so we should make sure that we
114 * have the same privileges that /proc/kcore needs
115 */
116 if (!capable(CAP_SYS_RAWIO)) {
117 error = -EPERM;
118 goto done;
119 }
120#ifdef CONFIG_COMPAT 121#ifdef CONFIG_COMPAT
121 /* WTF? I don't even want to _think_ about making this 122 /* WTF? I don't even want to _think_ about making this
122 work for 32-bit userspace. TBH I don't really want 123 work for 32-bit userspace. TBH I don't really want
123 to think about it at all. dwmw2. */ 124 to think about it at all. dwmw2. */
124 if (compat) { 125 if (compat) {
125 if (net_ratelimit()) 126 if (net_ratelimit())
126 printk(KERN_WARNING "32-bit task cannot be atmsigd\n"); 127 pr_warning("32-bit task cannot be atmsigd\n");
127 error = -EINVAL; 128 error = -EINVAL;
128 goto done; 129 goto done;
129 } 130 }
130#endif 131#endif
131 error = sigd_attach(vcc); 132 error = sigd_attach(vcc);
132 if (!error) 133 if (!error)
133 sock->state = SS_CONNECTED; 134 sock->state = SS_CONNECTED;
135 goto done;
136 case ATM_SETBACKEND:
137 case ATM_NEWBACKENDIF:
138 {
139 atm_backend_t backend;
140 error = get_user(backend, (atm_backend_t __user *)argp);
141 if (error)
134 goto done; 142 goto done;
135 case ATM_SETBACKEND: 143 switch (backend) {
136 case ATM_NEWBACKENDIF: 144 case ATM_BACKEND_PPP:
137 { 145 request_module("pppoatm");
138 atm_backend_t backend;
139 error = get_user(backend, (atm_backend_t __user *) argp);
140 if (error)
141 goto done;
142 switch (backend) {
143 case ATM_BACKEND_PPP:
144 request_module("pppoatm");
145 break;
146 case ATM_BACKEND_BR2684:
147 request_module("br2684");
148 break;
149 }
150 }
151 break;
152 case ATMMPC_CTRL:
153 case ATMMPC_DATA:
154 request_module("mpoa");
155 break;
156 case ATMARPD_CTRL:
157 request_module("clip");
158 break; 146 break;
159 case ATMLEC_CTRL: 147 case ATM_BACKEND_BR2684:
160 request_module("lec"); 148 request_module("br2684");
161 break; 149 break;
150 }
151 break;
152 }
153 case ATMMPC_CTRL:
154 case ATMMPC_DATA:
155 request_module("mpoa");
156 break;
157 case ATMARPD_CTRL:
158 request_module("clip");
159 break;
160 case ATMLEC_CTRL:
161 request_module("lec");
162 break;
162 } 163 }
163 164
164 error = -ENOIOCTLCMD; 165 error = -ENOIOCTLCMD;
165 166
166 mutex_lock(&ioctl_mutex); 167 mutex_lock(&ioctl_mutex);
167 list_for_each(pos, &ioctl_list) { 168 list_for_each(pos, &ioctl_list) {
168 struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list); 169 struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list);
169 if (try_module_get(ic->owner)) { 170 if (try_module_get(ic->owner)) {
170 error = ic->ioctl(sock, cmd, arg); 171 error = ic->ioctl(sock, cmd, arg);
171 module_put(ic->owner); 172 module_put(ic->owner);
@@ -184,7 +185,6 @@ done:
184 return error; 185 return error;
185} 186}
186 187
187
188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
189{ 189{
190 return do_vcc_ioctl(sock, cmd, arg, 0); 190 return do_vcc_ioctl(sock, cmd, arg, 0);
@@ -287,8 +287,8 @@ static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
287 sioc = compat_alloc_user_space(sizeof(*sioc)); 287 sioc = compat_alloc_user_space(sizeof(*sioc));
288 sioc32 = compat_ptr(arg); 288 sioc32 = compat_ptr(arg);
289 289
290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) 290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
291 || get_user(data, &sioc32->arg)) 291 get_user(data, &sioc32->arg))
292 return -EFAULT; 292 return -EFAULT;
293 datap = compat_ptr(data); 293 datap = compat_ptr(data);
294 if (put_user(datap, &sioc->arg)) 294 if (put_user(datap, &sioc->arg))
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 42749b7b917c..feeaf5718472 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -4,6 +4,9 @@
4 * Marko Kiiskila <mkiiskila@yahoo.com> 4 * Marko Kiiskila <mkiiskila@yahoo.com>
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
8
9#include <linux/slab.h>
7#include <linux/kernel.h> 10#include <linux/kernel.h>
8#include <linux/bitops.h> 11#include <linux/bitops.h>
9#include <linux/capability.h> 12#include <linux/capability.h>
@@ -16,7 +19,7 @@
16#include <linux/skbuff.h> 19#include <linux/skbuff.h>
17#include <linux/ip.h> 20#include <linux/ip.h>
18#include <asm/byteorder.h> 21#include <asm/byteorder.h>
19#include <asm/uaccess.h> 22#include <linux/uaccess.h>
20#include <net/arp.h> 23#include <net/arp.h>
21#include <net/dst.h> 24#include <net/dst.h>
22#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
@@ -85,17 +88,19 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
85 int is_rdesc, 88 int is_rdesc,
86 struct lec_arp_table **ret_entry); 89 struct lec_arp_table **ret_entry);
87static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, 90static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
88 const unsigned char *atm_addr, unsigned long remoteflag, 91 const unsigned char *atm_addr,
92 unsigned long remoteflag,
89 unsigned int targetless_le_arp); 93 unsigned int targetless_le_arp);
90static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); 94static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
91static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); 95static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
92static void lec_set_flush_tran_id(struct lec_priv *priv, 96static void lec_set_flush_tran_id(struct lec_priv *priv,
93 const unsigned char *atm_addr, 97 const unsigned char *atm_addr,
94 unsigned long tran_id); 98 unsigned long tran_id);
95static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, 99static void lec_vcc_added(struct lec_priv *priv,
100 const struct atmlec_ioc *ioc_data,
96 struct atm_vcc *vcc, 101 struct atm_vcc *vcc,
97 void (*old_push) (struct atm_vcc *vcc, 102 void (*old_push)(struct atm_vcc *vcc,
98 struct sk_buff *skb)); 103 struct sk_buff *skb));
99static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); 104static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
100 105
101/* must be done under lec_arp_lock */ 106/* must be done under lec_arp_lock */
@@ -110,7 +115,6 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
110 kfree(entry); 115 kfree(entry);
111} 116}
112 117
113
114static struct lane2_ops lane2_ops = { 118static struct lane2_ops lane2_ops = {
115 lane2_resolve, /* resolve, spec 3.1.3 */ 119 lane2_resolve, /* resolve, spec 3.1.3 */
116 lane2_associate_req, /* associate_req, spec 3.1.4 */ 120 lane2_associate_req, /* associate_req, spec 3.1.4 */
@@ -148,7 +152,8 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
148 mesg = (struct atmlec_msg *)skb2->data; 152 mesg = (struct atmlec_msg *)skb2->data;
149 mesg->type = l_topology_change; 153 mesg->type = l_topology_change;
150 buff += 4; 154 buff += 4;
151 mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ 155 mesg->content.normal.flag = *buff & 0x01;
156 /* 0x01 is topology change */
152 157
153 priv = netdev_priv(dev); 158 priv = netdev_priv(dev);
154 atm_force_charge(priv->lecd, skb2->truesize); 159 atm_force_charge(priv->lecd, skb2->truesize);
@@ -242,7 +247,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
242 247
243static void lec_tx_timeout(struct net_device *dev) 248static void lec_tx_timeout(struct net_device *dev)
244{ 249{
245 printk(KERN_INFO "%s: tx timeout\n", dev->name); 250 pr_info("%s\n", dev->name);
246 dev->trans_start = jiffies; 251 dev->trans_start = jiffies;
247 netif_wake_queue(dev); 252 netif_wake_queue(dev);
248} 253}
@@ -261,14 +266,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
261 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ 266 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
262#endif 267#endif
263 int is_rdesc; 268 int is_rdesc;
264#if DUMP_PACKETS > 0
265 char buf[300];
266 int i = 0;
267#endif /* DUMP_PACKETS >0 */
268 269
269 pr_debug("lec_start_xmit called\n"); 270 pr_debug("called\n");
270 if (!priv->lecd) { 271 if (!priv->lecd) {
271 printk("%s:No lecd attached\n", dev->name); 272 pr_info("%s:No lecd attached\n", dev->name);
272 dev->stats.tx_errors++; 273 dev->stats.tx_errors++;
273 netif_stop_queue(dev); 274 netif_stop_queue(dev);
274 kfree_skb(skb); 275 kfree_skb(skb);
@@ -276,8 +277,8 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
276 } 277 }
277 278
278 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", 279 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
279 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), 280 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
280 (long)skb_end_pointer(skb)); 281 (long)skb_end_pointer(skb));
281#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 282#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
282 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) 283 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
283 lec_handle_bridge(skb, dev); 284 lec_handle_bridge(skb, dev);
@@ -285,8 +286,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
285 286
286 /* Make sure we have room for lec_id */ 287 /* Make sure we have room for lec_id */
287 if (skb_headroom(skb) < 2) { 288 if (skb_headroom(skb) < 2) {
288 289 pr_debug("reallocating skb\n");
289 pr_debug("lec_start_xmit: reallocating skb\n");
290 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 290 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
291 kfree_skb(skb); 291 kfree_skb(skb);
292 if (skb2 == NULL) 292 if (skb2 == NULL)
@@ -313,23 +313,17 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
313 } 313 }
314#endif 314#endif
315 315
316#if DUMP_PACKETS > 0
317 printk("%s: send datalen:%ld lecid:%4.4x\n", dev->name,
318 skb->len, priv->lecid);
319#if DUMP_PACKETS >= 2 316#if DUMP_PACKETS >= 2
320 for (i = 0; i < skb->len && i < 99; i++) { 317#define MAX_DUMP_SKB 99
321 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
322 }
323#elif DUMP_PACKETS >= 1 318#elif DUMP_PACKETS >= 1
324 for (i = 0; i < skb->len && i < 30; i++) { 319#define MAX_DUMP_SKB 30
325 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 320#endif
326 } 321#if DUMP_PACKETS >= 1
322 printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n",
323 dev->name, skb->len, priv->lecid);
324 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
325 skb->data, min(skb->len, MAX_DUMP_SKB), true);
327#endif /* DUMP_PACKETS >= 1 */ 326#endif /* DUMP_PACKETS >= 1 */
328 if (i == skb->len)
329 printk("%s\n", buf);
330 else
331 printk("%s...\n", buf);
332#endif /* DUMP_PACKETS > 0 */
333 327
334 /* Minimum ethernet-frame size */ 328 /* Minimum ethernet-frame size */
335#ifdef CONFIG_TR 329#ifdef CONFIG_TR
@@ -367,31 +361,28 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
367#endif 361#endif
368 entry = NULL; 362 entry = NULL;
369 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); 363 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
370 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, 364 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
371 vcc, vcc ? vcc->flags : 0, entry); 365 dev->name, vcc, vcc ? vcc->flags : 0, entry);
372 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { 366 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
373 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { 367 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
374 pr_debug("%s:lec_start_xmit: queuing packet, ", 368 pr_debug("%s:queuing packet, MAC address %pM\n",
375 dev->name); 369 dev->name, lec_h->h_dest);
376 pr_debug("MAC address %pM\n", lec_h->h_dest);
377 skb_queue_tail(&entry->tx_wait, skb); 370 skb_queue_tail(&entry->tx_wait, skb);
378 } else { 371 } else {
379 pr_debug 372 pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n",
380 ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", 373 dev->name, lec_h->h_dest);
381 dev->name);
382 pr_debug("MAC address %pM\n", lec_h->h_dest);
383 dev->stats.tx_dropped++; 374 dev->stats.tx_dropped++;
384 dev_kfree_skb(skb); 375 dev_kfree_skb(skb);
385 } 376 }
386 goto out; 377 goto out;
387 } 378 }
388#if DUMP_PACKETS > 0 379#if DUMP_PACKETS > 0
389 printk("%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); 380 printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n",
381 dev->name, vcc->vpi, vcc->vci);
390#endif /* DUMP_PACKETS > 0 */ 382#endif /* DUMP_PACKETS > 0 */
391 383
392 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { 384 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
393 pr_debug("lec.c: emptying tx queue, "); 385 pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest);
394 pr_debug("MAC address %pM\n", lec_h->h_dest);
395 lec_send(vcc, skb2); 386 lec_send(vcc, skb2);
396 } 387 }
397 388
@@ -444,14 +435,12 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
444 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); 435 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
445 switch (mesg->type) { 436 switch (mesg->type) {
446 case l_set_mac_addr: 437 case l_set_mac_addr:
447 for (i = 0; i < 6; i++) { 438 for (i = 0; i < 6; i++)
448 dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; 439 dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
449 }
450 break; 440 break;
451 case l_del_mac_addr: 441 case l_del_mac_addr:
452 for (i = 0; i < 6; i++) { 442 for (i = 0; i < 6; i++)
453 dev->dev_addr[i] = 0; 443 dev->dev_addr[i] = 0;
454 }
455 break; 444 break;
456 case l_addr_delete: 445 case l_addr_delete:
457 lec_addr_delete(priv, mesg->content.normal.atm_addr, 446 lec_addr_delete(priv, mesg->content.normal.atm_addr,
@@ -477,10 +466,10 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
477 mesg->content.normal.atm_addr, 466 mesg->content.normal.atm_addr,
478 mesg->content.normal.flag, 467 mesg->content.normal.flag,
479 mesg->content.normal.targetless_le_arp); 468 mesg->content.normal.targetless_le_arp);
480 pr_debug("lec: in l_arp_update\n"); 469 pr_debug("in l_arp_update\n");
481 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ 470 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
482 pr_debug("lec: LANE2 3.1.5, got tlvs, size %d\n", 471 pr_debug("LANE2 3.1.5, got tlvs, size %d\n",
483 mesg->sizeoftlvs); 472 mesg->sizeoftlvs);
484 lane2_associate_ind(dev, mesg->content.normal.mac_addr, 473 lane2_associate_ind(dev, mesg->content.normal.mac_addr,
485 tmp, mesg->sizeoftlvs); 474 tmp, mesg->sizeoftlvs);
486 } 475 }
@@ -499,13 +488,14 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
499 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ); 488 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
500 priv->path_switching_delay = 489 priv->path_switching_delay =
501 (mesg->content.config.path_switching_delay * HZ); 490 (mesg->content.config.path_switching_delay * HZ);
502 priv->lane_version = mesg->content.config.lane_version; /* LANE2 */ 491 priv->lane_version = mesg->content.config.lane_version;
492 /* LANE2 */
503 priv->lane2_ops = NULL; 493 priv->lane2_ops = NULL;
504 if (priv->lane_version > 1) 494 if (priv->lane_version > 1)
505 priv->lane2_ops = &lane2_ops; 495 priv->lane2_ops = &lane2_ops;
506 if (dev_set_mtu(dev, mesg->content.config.mtu)) 496 if (dev_set_mtu(dev, mesg->content.config.mtu))
507 printk("%s: change_mtu to %d failed\n", dev->name, 497 pr_info("%s: change_mtu to %d failed\n",
508 mesg->content.config.mtu); 498 dev->name, mesg->content.config.mtu);
509 priv->is_proxy = mesg->content.config.is_proxy; 499 priv->is_proxy = mesg->content.config.is_proxy;
510 break; 500 break;
511 case l_flush_tran_id: 501 case l_flush_tran_id:
@@ -518,40 +508,35 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
518 break; 508 break;
519 case l_should_bridge: 509 case l_should_bridge:
520#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 510#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
521 { 511 {
522 pr_debug("%s: bridge zeppelin asks about %pM\n", 512 pr_debug("%s: bridge zeppelin asks about %pM\n",
523 dev->name, mesg->content.proxy.mac_addr); 513 dev->name, mesg->content.proxy.mac_addr);
524 514
525 if (br_fdb_test_addr_hook == NULL) 515 if (br_fdb_test_addr_hook == NULL)
526 break; 516 break;
527 517
528 if (br_fdb_test_addr_hook(dev, 518 if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) {
529 mesg->content.proxy.mac_addr)) { 519 /* hit from bridge table, send LE_ARP_RESPONSE */
530 /* hit from bridge table, send LE_ARP_RESPONSE */ 520 struct sk_buff *skb2;
531 struct sk_buff *skb2; 521 struct sock *sk;
532 struct sock *sk; 522
533 523 pr_debug("%s: entry found, responding to zeppelin\n",
534 pr_debug 524 dev->name);
535 ("%s: entry found, responding to zeppelin\n", 525 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
536 dev->name); 526 if (skb2 == NULL)
537 skb2 = 527 break;
538 alloc_skb(sizeof(struct atmlec_msg), 528 skb2->len = sizeof(struct atmlec_msg);
539 GFP_ATOMIC); 529 skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg));
540 if (skb2 == NULL) 530 atm_force_charge(priv->lecd, skb2->truesize);
541 break; 531 sk = sk_atm(priv->lecd);
542 skb2->len = sizeof(struct atmlec_msg); 532 skb_queue_tail(&sk->sk_receive_queue, skb2);
543 skb_copy_to_linear_data(skb2, mesg, 533 sk->sk_data_ready(sk, skb2->len);
544 sizeof(*mesg));
545 atm_force_charge(priv->lecd, skb2->truesize);
546 sk = sk_atm(priv->lecd);
547 skb_queue_tail(&sk->sk_receive_queue, skb2);
548 sk->sk_data_ready(sk, skb2->len);
549 }
550 } 534 }
535 }
551#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 536#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
552 break; 537 break;
553 default: 538 default:
554 printk("%s: Unknown message type %d\n", dev->name, mesg->type); 539 pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
555 dev_kfree_skb(skb); 540 dev_kfree_skb(skb);
556 return -EINVAL; 541 return -EINVAL;
557 } 542 }
@@ -572,14 +557,13 @@ static void lec_atm_close(struct atm_vcc *vcc)
572 lec_arp_destroy(priv); 557 lec_arp_destroy(priv);
573 558
574 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 559 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
575 printk("%s lec_atm_close: closing with messages pending\n", 560 pr_info("%s closing with messages pending\n", dev->name);
576 dev->name); 561 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
577 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue)) != NULL) {
578 atm_return(vcc, skb->truesize); 562 atm_return(vcc, skb->truesize);
579 dev_kfree_skb(skb); 563 dev_kfree_skb(skb);
580 } 564 }
581 565
582 printk("%s: Shut down!\n", dev->name); 566 pr_info("%s: Shut down!\n", dev->name);
583 module_put(THIS_MODULE); 567 module_put(THIS_MODULE);
584} 568}
585 569
@@ -608,9 +592,8 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
608 struct sk_buff *skb; 592 struct sk_buff *skb;
609 struct atmlec_msg *mesg; 593 struct atmlec_msg *mesg;
610 594
611 if (!priv || !priv->lecd) { 595 if (!priv || !priv->lecd)
612 return -1; 596 return -1;
613 }
614 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 597 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
615 if (!skb) 598 if (!skb)
616 return -1; 599 return -1;
@@ -633,7 +616,7 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
633 sk->sk_data_ready(sk, skb->len); 616 sk->sk_data_ready(sk, skb->len);
634 617
635 if (data != NULL) { 618 if (data != NULL) {
636 pr_debug("lec: about to send %d bytes of data\n", data->len); 619 pr_debug("about to send %d bytes of data\n", data->len);
637 atm_force_charge(priv->lecd, data->truesize); 620 atm_force_charge(priv->lecd, data->truesize);
638 skb_queue_tail(&sk->sk_receive_queue, data); 621 skb_queue_tail(&sk->sk_receive_queue, data);
639 sk->sk_data_ready(sk, skb->len); 622 sk->sk_data_ready(sk, skb->len);
@@ -691,36 +674,28 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
691 struct net_device *dev = (struct net_device *)vcc->proto_data; 674 struct net_device *dev = (struct net_device *)vcc->proto_data;
692 struct lec_priv *priv = netdev_priv(dev); 675 struct lec_priv *priv = netdev_priv(dev);
693 676
694#if DUMP_PACKETS >0 677#if DUMP_PACKETS > 0
695 int i = 0; 678 printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n",
696 char buf[300]; 679 dev->name, vcc->vpi, vcc->vci);
697
698 printk("%s: lec_push vcc vpi:%d vci:%d\n", dev->name,
699 vcc->vpi, vcc->vci);
700#endif 680#endif
701 if (!skb) { 681 if (!skb) {
702 pr_debug("%s: null skb\n", dev->name); 682 pr_debug("%s: null skb\n", dev->name);
703 lec_vcc_close(priv, vcc); 683 lec_vcc_close(priv, vcc);
704 return; 684 return;
705 } 685 }
706#if DUMP_PACKETS > 0
707 printk("%s: rcv datalen:%ld lecid:%4.4x\n", dev->name,
708 skb->len, priv->lecid);
709#if DUMP_PACKETS >= 2 686#if DUMP_PACKETS >= 2
710 for (i = 0; i < skb->len && i < 99; i++) { 687#define MAX_SKB_DUMP 99
711 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
712 }
713#elif DUMP_PACKETS >= 1 688#elif DUMP_PACKETS >= 1
714 for (i = 0; i < skb->len && i < 30; i++) { 689#define MAX_SKB_DUMP 30
715 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 690#endif
716 } 691#if DUMP_PACKETS > 0
717#endif /* DUMP_PACKETS >= 1 */ 692 printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n",
718 if (i == skb->len) 693 dev->name, skb->len, priv->lecid);
719 printk("%s\n", buf); 694 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
720 else 695 skb->data, min(MAX_SKB_DUMP, skb->len), true);
721 printk("%s...\n", buf);
722#endif /* DUMP_PACKETS > 0 */ 696#endif /* DUMP_PACKETS > 0 */
723 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ 697 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
698 /* Control frame, to daemon */
724 struct sock *sk = sk_atm(vcc); 699 struct sock *sk = sk_atm(vcc);
725 700
726 pr_debug("%s: To daemon\n", dev->name); 701 pr_debug("%s: To daemon\n", dev->name);
@@ -778,9 +753,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
778 dev_kfree_skb(skb); 753 dev_kfree_skb(skb);
779 return; 754 return;
780 } 755 }
781 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 756 if (!hlist_empty(&priv->lec_arp_empty_ones))
782 lec_arp_check_empties(priv, vcc, skb); 757 lec_arp_check_empties(priv, vcc, skb);
783 }
784 skb_pull(skb, 2); /* skip lec_id */ 758 skb_pull(skb, 2); /* skip lec_id */
785#ifdef CONFIG_TR 759#ifdef CONFIG_TR
786 if (priv->is_trdev) 760 if (priv->is_trdev)
@@ -801,7 +775,7 @@ static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
801 struct net_device *dev = skb->dev; 775 struct net_device *dev = skb->dev;
802 776
803 if (vpriv == NULL) { 777 if (vpriv == NULL) {
804 printk("lec_pop(): vpriv = NULL!?!?!?\n"); 778 pr_info("vpriv = NULL!?!?!?\n");
805 return; 779 return;
806 } 780 }
807 781
@@ -822,15 +796,13 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
822 796
823 /* Lecd must be up in this case */ 797 /* Lecd must be up in this case */
824 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); 798 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
825 if (bytes_left != 0) { 799 if (bytes_left != 0)
826 printk 800 pr_info("copy from user failed for %d bytes\n", bytes_left);
827 ("lec: lec_vcc_attach, copy from user failed for %d bytes\n",
828 bytes_left);
829 }
830 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || 801 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
831 !dev_lec[ioc_data.dev_num]) 802 !dev_lec[ioc_data.dev_num])
832 return -EINVAL; 803 return -EINVAL;
833 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 804 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
805 if (!vpriv)
834 return -ENOMEM; 806 return -ENOMEM;
835 vpriv->xoff = 0; 807 vpriv->xoff = 0;
836 vpriv->old_pop = vcc->pop; 808 vpriv->old_pop = vcc->pop;
@@ -921,9 +893,8 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
921 priv->flush_timeout = (4 * HZ); 893 priv->flush_timeout = (4 * HZ);
922 priv->path_switching_delay = (6 * HZ); 894 priv->path_switching_delay = (6 * HZ);
923 895
924 if (dev_lec[i]->flags & IFF_UP) { 896 if (dev_lec[i]->flags & IFF_UP)
925 netif_start_queue(dev_lec[i]); 897 netif_start_queue(dev_lec[i]);
926 }
927 __module_get(THIS_MODULE); 898 __module_get(THIS_MODULE);
928 return i; 899 return i;
929} 900}
@@ -1125,7 +1096,9 @@ static int lec_seq_show(struct seq_file *seq, void *v)
1125 else { 1096 else {
1126 struct lec_state *state = seq->private; 1097 struct lec_state *state = seq->private;
1127 struct net_device *dev = state->dev; 1098 struct net_device *dev = state->dev;
1128 struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next); 1099 struct lec_arp_table *entry = hlist_entry(state->node,
1100 struct lec_arp_table,
1101 next);
1129 1102
1130 seq_printf(seq, "%s ", dev->name); 1103 seq_printf(seq, "%s ", dev->name);
1131 lec_info(seq, entry); 1104 lec_info(seq, entry);
@@ -1199,13 +1172,13 @@ static int __init lane_module_init(void)
1199 1172
1200 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); 1173 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
1201 if (!p) { 1174 if (!p) {
1202 printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n"); 1175 pr_err("Unable to initialize /proc/net/atm/lec\n");
1203 return -ENOMEM; 1176 return -ENOMEM;
1204 } 1177 }
1205#endif 1178#endif
1206 1179
1207 register_atm_ioctl(&lane_ioctl_ops); 1180 register_atm_ioctl(&lane_ioctl_ops);
1208 printk("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1181 pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
1209 return 0; 1182 return 0;
1210} 1183}
1211 1184
@@ -1294,13 +1267,13 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1294 struct lec_priv *priv = netdev_priv(dev); 1267 struct lec_priv *priv = netdev_priv(dev);
1295 1268
1296 if (compare_ether_addr(lan_dst, dev->dev_addr)) 1269 if (compare_ether_addr(lan_dst, dev->dev_addr))
1297 return (0); /* not our mac address */ 1270 return 0; /* not our mac address */
1298 1271
1299 kfree(priv->tlvs); /* NULL if there was no previous association */ 1272 kfree(priv->tlvs); /* NULL if there was no previous association */
1300 1273
1301 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); 1274 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
1302 if (priv->tlvs == NULL) 1275 if (priv->tlvs == NULL)
1303 return (0); 1276 return 0;
1304 priv->sizeoftlvs = sizeoftlvs; 1277 priv->sizeoftlvs = sizeoftlvs;
1305 1278
1306 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); 1279 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
@@ -1310,12 +1283,12 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1310 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); 1283 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
1311 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); 1284 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
1312 if (retval != 0) 1285 if (retval != 0)
1313 printk("lec.c: lane2_associate_req() failed\n"); 1286 pr_info("lec.c: lane2_associate_req() failed\n");
1314 /* 1287 /*
1315 * If the previous association has changed we must 1288 * If the previous association has changed we must
1316 * somehow notify other LANE entities about the change 1289 * somehow notify other LANE entities about the change
1317 */ 1290 */
1318 return (1); 1291 return 1;
1319} 1292}
1320 1293
1321/* 1294/*
@@ -1348,12 +1321,12 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1348 entry->sizeoftlvs = sizeoftlvs; 1321 entry->sizeoftlvs = sizeoftlvs;
1349#endif 1322#endif
1350#if 0 1323#if 0
1351 printk("lec.c: lane2_associate_ind()\n"); 1324 pr_info("\n");
1352 printk("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); 1325 pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
1353 while (i < sizeoftlvs) 1326 while (i < sizeoftlvs)
1354 printk("%02x ", tlvs[i++]); 1327 pr_cont("%02x ", tlvs[i++]);
1355 1328
1356 printk("\n"); 1329 pr_cont("\n");
1357#endif 1330#endif
1358 1331
1359 /* tell MPOA about the TLVs we saw */ 1332 /* tell MPOA about the TLVs we saw */
@@ -1373,15 +1346,15 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1373 1346
1374#include <linux/types.h> 1347#include <linux/types.h>
1375#include <linux/timer.h> 1348#include <linux/timer.h>
1376#include <asm/param.h> 1349#include <linux/param.h>
1377#include <asm/atomic.h> 1350#include <asm/atomic.h>
1378#include <linux/inetdevice.h> 1351#include <linux/inetdevice.h>
1379#include <net/route.h> 1352#include <net/route.h>
1380 1353
1381#if 0 1354#if 0
1382#define pr_debug(format,args...) 1355#define pr_debug(format, args...)
1383/* 1356/*
1384#define pr_debug printk 1357 #define pr_debug printk
1385*/ 1358*/
1386#endif 1359#endif
1387#define DEBUG_ARP_TABLE 0 1360#define DEBUG_ARP_TABLE 0
@@ -1395,7 +1368,7 @@ static void lec_arp_expire_arp(unsigned long data);
1395 * Arp table funcs 1368 * Arp table funcs
1396 */ 1369 */
1397 1370
1398#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE -1)) 1371#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1))
1399 1372
1400/* 1373/*
1401 * Initialization of arp-cache 1374 * Initialization of arp-cache
@@ -1404,9 +1377,8 @@ static void lec_arp_init(struct lec_priv *priv)
1404{ 1377{
1405 unsigned short i; 1378 unsigned short i;
1406 1379
1407 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1380 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
1408 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1381 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1409 }
1410 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1382 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1411 INIT_HLIST_HEAD(&priv->lec_no_forward); 1383 INIT_HLIST_HEAD(&priv->lec_no_forward);
1412 INIT_HLIST_HEAD(&priv->mcast_fwds); 1384 INIT_HLIST_HEAD(&priv->mcast_fwds);
@@ -1450,10 +1422,7 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
1450 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; 1422 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
1451 hlist_add_head(&entry->next, tmp); 1423 hlist_add_head(&entry->next, tmp);
1452 1424
1453 pr_debug("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1425 pr_debug("Added entry:%pM\n", entry->mac_addr);
1454 0xff & entry->mac_addr[0], 0xff & entry->mac_addr[1],
1455 0xff & entry->mac_addr[2], 0xff & entry->mac_addr[3],
1456 0xff & entry->mac_addr[4], 0xff & entry->mac_addr[5]);
1457} 1426}
1458 1427
1459/* 1428/*
@@ -1466,20 +1435,23 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1466 struct lec_arp_table *entry; 1435 struct lec_arp_table *entry;
1467 int i, remove_vcc = 1; 1436 int i, remove_vcc = 1;
1468 1437
1469 if (!to_remove) { 1438 if (!to_remove)
1470 return -1; 1439 return -1;
1471 }
1472 1440
1473 hlist_del(&to_remove->next); 1441 hlist_del(&to_remove->next);
1474 del_timer(&to_remove->timer); 1442 del_timer(&to_remove->timer);
1475 1443
1476 /* If this is the only MAC connected to this VCC, also tear down the VCC */ 1444 /*
1445 * If this is the only MAC connected to this VCC,
1446 * also tear down the VCC
1447 */
1477 if (to_remove->status >= ESI_FLUSH_PENDING) { 1448 if (to_remove->status >= ESI_FLUSH_PENDING) {
1478 /* 1449 /*
1479 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT 1450 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
1480 */ 1451 */
1481 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1452 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1482 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 1453 hlist_for_each_entry(entry, node,
1454 &priv->lec_arp_tables[i], next) {
1483 if (memcmp(to_remove->atm_addr, 1455 if (memcmp(to_remove->atm_addr,
1484 entry->atm_addr, ATM_ESA_LEN) == 0) { 1456 entry->atm_addr, ATM_ESA_LEN) == 0) {
1485 remove_vcc = 0; 1457 remove_vcc = 0;
@@ -1492,10 +1464,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1492 } 1464 }
1493 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ 1465 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
1494 1466
1495 pr_debug("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1467 pr_debug("Removed entry:%pM\n", to_remove->mac_addr);
1496 0xff & to_remove->mac_addr[0], 0xff & to_remove->mac_addr[1],
1497 0xff & to_remove->mac_addr[2], 0xff & to_remove->mac_addr[3],
1498 0xff & to_remove->mac_addr[4], 0xff & to_remove->mac_addr[5]);
1499 return 0; 1468 return 0;
1500} 1469}
1501 1470
@@ -1513,9 +1482,8 @@ static const char *get_status_string(unsigned char st)
1513 return "ESI_FLUSH_PENDING"; 1482 return "ESI_FLUSH_PENDING";
1514 case ESI_FORWARD_DIRECT: 1483 case ESI_FORWARD_DIRECT:
1515 return "ESI_FORWARD_DIRECT"; 1484 return "ESI_FORWARD_DIRECT";
1516 default:
1517 return "<UNKNOWN>";
1518 } 1485 }
1486 return "<UNKNOWN>";
1519} 1487}
1520 1488
1521static void dump_arp_table(struct lec_priv *priv) 1489static void dump_arp_table(struct lec_priv *priv)
@@ -1525,18 +1493,15 @@ static void dump_arp_table(struct lec_priv *priv)
1525 char buf[256]; 1493 char buf[256];
1526 int i, j, offset; 1494 int i, j, offset;
1527 1495
1528 printk("Dump %p:\n", priv); 1496 pr_info("Dump %p:\n", priv);
1529 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1497 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1530 hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) { 1498 hlist_for_each_entry(rulla, node,
1499 &priv->lec_arp_tables[i], next) {
1531 offset = 0; 1500 offset = 0;
1532 offset += sprintf(buf, "%d: %p\n", i, rulla); 1501 offset += sprintf(buf, "%d: %p\n", i, rulla);
1533 offset += sprintf(buf + offset, "Mac:"); 1502 offset += sprintf(buf + offset, "Mac: %pM",
1534 for (j = 0; j < ETH_ALEN; j++) { 1503 rulla->mac_addr);
1535 offset += sprintf(buf + offset, 1504 offset += sprintf(buf + offset, " Atm:");
1536 "%2.2x ",
1537 rulla->mac_addr[j] & 0xff);
1538 }
1539 offset += sprintf(buf + offset, "Atm:");
1540 for (j = 0; j < ATM_ESA_LEN; j++) { 1505 for (j = 0; j < ATM_ESA_LEN; j++) {
1541 offset += sprintf(buf + offset, 1506 offset += sprintf(buf + offset,
1542 "%2.2x ", 1507 "%2.2x ",
@@ -1556,20 +1521,16 @@ static void dump_arp_table(struct lec_priv *priv)
1556 "Flags:%x, Packets_flooded:%x, Status: %s ", 1521 "Flags:%x, Packets_flooded:%x, Status: %s ",
1557 rulla->flags, rulla->packets_flooded, 1522 rulla->flags, rulla->packets_flooded,
1558 get_status_string(rulla->status)); 1523 get_status_string(rulla->status));
1559 printk("%s\n", buf); 1524 pr_info("%s\n", buf);
1560 } 1525 }
1561 } 1526 }
1562 1527
1563 if (!hlist_empty(&priv->lec_no_forward)) 1528 if (!hlist_empty(&priv->lec_no_forward))
1564 printk("No forward\n"); 1529 pr_info("No forward\n");
1565 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { 1530 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
1566 offset = 0; 1531 offset = 0;
1567 offset += sprintf(buf + offset, "Mac:"); 1532 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1568 for (j = 0; j < ETH_ALEN; j++) { 1533 offset += sprintf(buf + offset, " Atm:");
1569 offset += sprintf(buf + offset, "%2.2x ",
1570 rulla->mac_addr[j] & 0xff);
1571 }
1572 offset += sprintf(buf + offset, "Atm:");
1573 for (j = 0; j < ATM_ESA_LEN; j++) { 1534 for (j = 0; j < ATM_ESA_LEN; j++) {
1574 offset += sprintf(buf + offset, "%2.2x ", 1535 offset += sprintf(buf + offset, "%2.2x ",
1575 rulla->atm_addr[j] & 0xff); 1536 rulla->atm_addr[j] & 0xff);
@@ -1586,19 +1547,15 @@ static void dump_arp_table(struct lec_priv *priv)
1586 "Flags:%x, Packets_flooded:%x, Status: %s ", 1547 "Flags:%x, Packets_flooded:%x, Status: %s ",
1587 rulla->flags, rulla->packets_flooded, 1548 rulla->flags, rulla->packets_flooded,
1588 get_status_string(rulla->status)); 1549 get_status_string(rulla->status));
1589 printk("%s\n", buf); 1550 pr_info("%s\n", buf);
1590 } 1551 }
1591 1552
1592 if (!hlist_empty(&priv->lec_arp_empty_ones)) 1553 if (!hlist_empty(&priv->lec_arp_empty_ones))
1593 printk("Empty ones\n"); 1554 pr_info("Empty ones\n");
1594 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { 1555 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
1595 offset = 0; 1556 offset = 0;
1596 offset += sprintf(buf + offset, "Mac:"); 1557 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1597 for (j = 0; j < ETH_ALEN; j++) { 1558 offset += sprintf(buf + offset, " Atm:");
1598 offset += sprintf(buf + offset, "%2.2x ",
1599 rulla->mac_addr[j] & 0xff);
1600 }
1601 offset += sprintf(buf + offset, "Atm:");
1602 for (j = 0; j < ATM_ESA_LEN; j++) { 1559 for (j = 0; j < ATM_ESA_LEN; j++) {
1603 offset += sprintf(buf + offset, "%2.2x ", 1560 offset += sprintf(buf + offset, "%2.2x ",
1604 rulla->atm_addr[j] & 0xff); 1561 rulla->atm_addr[j] & 0xff);
@@ -1615,19 +1572,15 @@ static void dump_arp_table(struct lec_priv *priv)
1615 "Flags:%x, Packets_flooded:%x, Status: %s ", 1572 "Flags:%x, Packets_flooded:%x, Status: %s ",
1616 rulla->flags, rulla->packets_flooded, 1573 rulla->flags, rulla->packets_flooded,
1617 get_status_string(rulla->status)); 1574 get_status_string(rulla->status));
1618 printk("%s", buf); 1575 pr_info("%s", buf);
1619 } 1576 }
1620 1577
1621 if (!hlist_empty(&priv->mcast_fwds)) 1578 if (!hlist_empty(&priv->mcast_fwds))
1622 printk("Multicast Forward VCCs\n"); 1579 pr_info("Multicast Forward VCCs\n");
1623 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { 1580 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
1624 offset = 0; 1581 offset = 0;
1625 offset += sprintf(buf + offset, "Mac:"); 1582 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1626 for (j = 0; j < ETH_ALEN; j++) { 1583 offset += sprintf(buf + offset, " Atm:");
1627 offset += sprintf(buf + offset, "%2.2x ",
1628 rulla->mac_addr[j] & 0xff);
1629 }
1630 offset += sprintf(buf + offset, "Atm:");
1631 for (j = 0; j < ATM_ESA_LEN; j++) { 1584 for (j = 0; j < ATM_ESA_LEN; j++) {
1632 offset += sprintf(buf + offset, "%2.2x ", 1585 offset += sprintf(buf + offset, "%2.2x ",
1633 rulla->atm_addr[j] & 0xff); 1586 rulla->atm_addr[j] & 0xff);
@@ -1644,7 +1597,7 @@ static void dump_arp_table(struct lec_priv *priv)
1644 "Flags:%x, Packets_flooded:%x, Status: %s ", 1597 "Flags:%x, Packets_flooded:%x, Status: %s ",
1645 rulla->flags, rulla->packets_flooded, 1598 rulla->flags, rulla->packets_flooded,
1646 get_status_string(rulla->status)); 1599 get_status_string(rulla->status));
1647 printk("%s\n", buf); 1600 pr_info("%s\n", buf);
1648 } 1601 }
1649 1602
1650} 1603}
@@ -1670,14 +1623,16 @@ static void lec_arp_destroy(struct lec_priv *priv)
1670 1623
1671 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1624 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1672 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1625 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1673 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1626 hlist_for_each_entry_safe(entry, node, next,
1627 &priv->lec_arp_tables[i], next) {
1674 lec_arp_remove(priv, entry); 1628 lec_arp_remove(priv, entry);
1675 lec_arp_put(entry); 1629 lec_arp_put(entry);
1676 } 1630 }
1677 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1631 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1678 } 1632 }
1679 1633
1680 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1634 hlist_for_each_entry_safe(entry, node, next,
1635 &priv->lec_arp_empty_ones, next) {
1681 del_timer_sync(&entry->timer); 1636 del_timer_sync(&entry->timer);
1682 lec_arp_clear_vccs(entry); 1637 lec_arp_clear_vccs(entry);
1683 hlist_del(&entry->next); 1638 hlist_del(&entry->next);
@@ -1685,7 +1640,8 @@ static void lec_arp_destroy(struct lec_priv *priv)
1685 } 1640 }
1686 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1641 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1687 1642
1688 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 1643 hlist_for_each_entry_safe(entry, node, next,
1644 &priv->lec_no_forward, next) {
1689 del_timer_sync(&entry->timer); 1645 del_timer_sync(&entry->timer);
1690 lec_arp_clear_vccs(entry); 1646 lec_arp_clear_vccs(entry);
1691 hlist_del(&entry->next); 1647 hlist_del(&entry->next);
@@ -1714,15 +1670,12 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1714 struct hlist_head *head; 1670 struct hlist_head *head;
1715 struct lec_arp_table *entry; 1671 struct lec_arp_table *entry;
1716 1672
1717 pr_debug("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1673 pr_debug("%pM\n", mac_addr);
1718 mac_addr[0] & 0xff, mac_addr[1] & 0xff, mac_addr[2] & 0xff,
1719 mac_addr[3] & 0xff, mac_addr[4] & 0xff, mac_addr[5] & 0xff);
1720 1674
1721 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; 1675 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1722 hlist_for_each_entry(entry, node, head, next) { 1676 hlist_for_each_entry(entry, node, head, next) {
1723 if (!compare_ether_addr(mac_addr, entry->mac_addr)) { 1677 if (!compare_ether_addr(mac_addr, entry->mac_addr))
1724 return entry; 1678 return entry;
1725 }
1726 } 1679 }
1727 return NULL; 1680 return NULL;
1728} 1681}
@@ -1734,7 +1687,7 @@ static struct lec_arp_table *make_entry(struct lec_priv *priv,
1734 1687
1735 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1688 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1736 if (!to_return) { 1689 if (!to_return) {
1737 printk("LEC: Arp entry kmalloc failed\n"); 1690 pr_info("LEC: Arp entry kmalloc failed\n");
1738 return NULL; 1691 return NULL;
1739 } 1692 }
1740 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1693 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
@@ -1755,7 +1708,7 @@ static void lec_arp_expire_arp(unsigned long data)
1755 1708
1756 entry = (struct lec_arp_table *)data; 1709 entry = (struct lec_arp_table *)data;
1757 1710
1758 pr_debug("lec_arp_expire_arp\n"); 1711 pr_debug("\n");
1759 if (entry->status == ESI_ARP_PENDING) { 1712 if (entry->status == ESI_ARP_PENDING) {
1760 if (entry->no_tries <= entry->priv->max_retry_count) { 1713 if (entry->no_tries <= entry->priv->max_retry_count) {
1761 if (entry->is_rdesc) 1714 if (entry->is_rdesc)
@@ -1779,10 +1732,10 @@ static void lec_arp_expire_vcc(unsigned long data)
1779 1732
1780 del_timer(&to_remove->timer); 1733 del_timer(&to_remove->timer);
1781 1734
1782 pr_debug("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n", 1735 pr_debug("%p %p: vpi:%d vci:%d\n",
1783 to_remove, priv, 1736 to_remove, priv,
1784 to_remove->vcc ? to_remove->recv_vcc->vpi : 0, 1737 to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
1785 to_remove->vcc ? to_remove->recv_vcc->vci : 0); 1738 to_remove->vcc ? to_remove->recv_vcc->vci : 0);
1786 1739
1787 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1740 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1788 hlist_del(&to_remove->next); 1741 hlist_del(&to_remove->next);
@@ -1792,6 +1745,50 @@ static void lec_arp_expire_vcc(unsigned long data)
1792 lec_arp_put(to_remove); 1745 lec_arp_put(to_remove);
1793} 1746}
1794 1747
1748static bool __lec_arp_check_expire(struct lec_arp_table *entry,
1749 unsigned long now,
1750 struct lec_priv *priv)
1751{
1752 unsigned long time_to_check;
1753
1754 if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change)
1755 time_to_check = priv->forward_delay_time;
1756 else
1757 time_to_check = priv->aging_time;
1758
1759 pr_debug("About to expire: %lx - %lx > %lx\n",
1760 now, entry->last_used, time_to_check);
1761 if (time_after(now, entry->last_used + time_to_check) &&
1762 !(entry->flags & LEC_PERMANENT_FLAG) &&
1763 !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
1764 /* Remove entry */
1765 pr_debug("Entry timed out\n");
1766 lec_arp_remove(priv, entry);
1767 lec_arp_put(entry);
1768 } else {
1769 /* Something else */
1770 if ((entry->status == ESI_VC_PENDING ||
1771 entry->status == ESI_ARP_PENDING) &&
1772 time_after_eq(now, entry->timestamp +
1773 priv->max_unknown_frame_time)) {
1774 entry->timestamp = jiffies;
1775 entry->packets_flooded = 0;
1776 if (entry->status == ESI_VC_PENDING)
1777 send_to_lecd(priv, l_svc_setup,
1778 entry->mac_addr,
1779 entry->atm_addr,
1780 NULL);
1781 }
1782 if (entry->status == ESI_FLUSH_PENDING &&
1783 time_after_eq(now, entry->timestamp +
1784 priv->path_switching_delay)) {
1785 lec_arp_hold(entry);
1786 return true;
1787 }
1788 }
1789
1790 return false;
1791}
1795/* 1792/*
1796 * Expire entries. 1793 * Expire entries.
1797 * 1. Re-set timer 1794 * 1. Re-set timer
@@ -1816,62 +1813,28 @@ static void lec_arp_check_expire(struct work_struct *work)
1816 struct hlist_node *node, *next; 1813 struct hlist_node *node, *next;
1817 struct lec_arp_table *entry; 1814 struct lec_arp_table *entry;
1818 unsigned long now; 1815 unsigned long now;
1819 unsigned long time_to_check;
1820 int i; 1816 int i;
1821 1817
1822 pr_debug("lec_arp_check_expire %p\n", priv); 1818 pr_debug("%p\n", priv);
1823 now = jiffies; 1819 now = jiffies;
1824restart: 1820restart:
1825 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1821 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1826 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1822 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1827 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1823 hlist_for_each_entry_safe(entry, node, next,
1828 if ((entry->flags) & LEC_REMOTE_FLAG && 1824 &priv->lec_arp_tables[i], next) {
1829 priv->topology_change) 1825 if (__lec_arp_check_expire(entry, now, priv)) {
1830 time_to_check = priv->forward_delay_time; 1826 struct sk_buff *skb;
1831 else 1827 struct atm_vcc *vcc = entry->vcc;
1832 time_to_check = priv->aging_time; 1828
1833 1829 spin_unlock_irqrestore(&priv->lec_arp_lock,
1834 pr_debug("About to expire: %lx - %lx > %lx\n", 1830 flags);
1835 now, entry->last_used, time_to_check); 1831 while ((skb = skb_dequeue(&entry->tx_wait)))
1836 if (time_after(now, entry->last_used + time_to_check) 1832 lec_send(vcc, skb);
1837 && !(entry->flags & LEC_PERMANENT_FLAG) 1833 entry->last_used = jiffies;
1838 && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ 1834 entry->status = ESI_FORWARD_DIRECT;
1839 /* Remove entry */
1840 pr_debug("LEC:Entry timed out\n");
1841 lec_arp_remove(priv, entry);
1842 lec_arp_put(entry); 1835 lec_arp_put(entry);
1843 } else { 1836
1844 /* Something else */ 1837 goto restart;
1845 if ((entry->status == ESI_VC_PENDING ||
1846 entry->status == ESI_ARP_PENDING)
1847 && time_after_eq(now,
1848 entry->timestamp +
1849 priv->
1850 max_unknown_frame_time)) {
1851 entry->timestamp = jiffies;
1852 entry->packets_flooded = 0;
1853 if (entry->status == ESI_VC_PENDING)
1854 send_to_lecd(priv, l_svc_setup,
1855 entry->mac_addr,
1856 entry->atm_addr,
1857 NULL);
1858 }
1859 if (entry->status == ESI_FLUSH_PENDING
1860 &&
1861 time_after_eq(now, entry->timestamp +
1862 priv->path_switching_delay)) {
1863 struct sk_buff *skb;
1864 struct atm_vcc *vcc = entry->vcc;
1865
1866 lec_arp_hold(entry);
1867 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1868 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
1869 lec_send(vcc, skb);
1870 entry->last_used = jiffies;
1871 entry->status = ESI_FORWARD_DIRECT;
1872 lec_arp_put(entry);
1873 goto restart;
1874 }
1875 } 1838 }
1876 } 1839 }
1877 } 1840 }
@@ -1885,7 +1848,8 @@ restart:
1885 * 1848 *
1886 */ 1849 */
1887static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 1850static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1888 const unsigned char *mac_to_find, int is_rdesc, 1851 const unsigned char *mac_to_find,
1852 int is_rdesc,
1889 struct lec_arp_table **ret_entry) 1853 struct lec_arp_table **ret_entry)
1890{ 1854{
1891 unsigned long flags; 1855 unsigned long flags;
@@ -1921,9 +1885,8 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1921 * If the LE_ARP cache entry is still pending, reset count to 0 1885 * If the LE_ARP cache entry is still pending, reset count to 0
1922 * so another LE_ARP request can be made for this frame. 1886 * so another LE_ARP request can be made for this frame.
1923 */ 1887 */
1924 if (entry->status == ESI_ARP_PENDING) { 1888 if (entry->status == ESI_ARP_PENDING)
1925 entry->no_tries = 0; 1889 entry->no_tries = 0;
1926 }
1927 /* 1890 /*
1928 * Data direct VC not yet set up, check to see if the unknown 1891 * Data direct VC not yet set up, check to see if the unknown
1929 * frame count is greater than the limit. If the limit has 1892 * frame count is greater than the limit. If the limit has
@@ -1934,7 +1897,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1934 entry->packets_flooded < 1897 entry->packets_flooded <
1935 priv->maximum_unknown_frame_count) { 1898 priv->maximum_unknown_frame_count) {
1936 entry->packets_flooded++; 1899 entry->packets_flooded++;
1937 pr_debug("LEC_ARP: Flooding..\n"); 1900 pr_debug("Flooding..\n");
1938 found = priv->mcast_vcc; 1901 found = priv->mcast_vcc;
1939 goto out; 1902 goto out;
1940 } 1903 }
@@ -1945,13 +1908,13 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1945 */ 1908 */
1946 lec_arp_hold(entry); 1909 lec_arp_hold(entry);
1947 *ret_entry = entry; 1910 *ret_entry = entry;
1948 pr_debug("lec: entry->status %d entry->vcc %p\n", entry->status, 1911 pr_debug("entry->status %d entry->vcc %p\n", entry->status,
1949 entry->vcc); 1912 entry->vcc);
1950 found = NULL; 1913 found = NULL;
1951 } else { 1914 } else {
1952 /* No matching entry was found */ 1915 /* No matching entry was found */
1953 entry = make_entry(priv, mac_to_find); 1916 entry = make_entry(priv, mac_to_find);
1954 pr_debug("LEC_ARP: Making entry\n"); 1917 pr_debug("Making entry\n");
1955 if (!entry) { 1918 if (!entry) {
1956 found = priv->mcast_vcc; 1919 found = priv->mcast_vcc;
1957 goto out; 1920 goto out;
@@ -1988,13 +1951,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
1988 struct lec_arp_table *entry; 1951 struct lec_arp_table *entry;
1989 int i; 1952 int i;
1990 1953
1991 pr_debug("lec_addr_delete\n"); 1954 pr_debug("\n");
1992 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1955 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1993 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1956 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1994 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1957 hlist_for_each_entry_safe(entry, node, next,
1995 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) 1958 &priv->lec_arp_tables[i], next) {
1996 && (permanent || 1959 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
1997 !(entry->flags & LEC_PERMANENT_FLAG))) { 1960 (permanent ||
1961 !(entry->flags & LEC_PERMANENT_FLAG))) {
1998 lec_arp_remove(priv, entry); 1962 lec_arp_remove(priv, entry);
1999 lec_arp_put(entry); 1963 lec_arp_put(entry);
2000 } 1964 }
@@ -2019,10 +1983,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2019 struct lec_arp_table *entry, *tmp; 1983 struct lec_arp_table *entry, *tmp;
2020 int i; 1984 int i;
2021 1985
2022 pr_debug("lec:%s", (targetless_le_arp) ? "targetless " : " "); 1986 pr_debug("%smac:%pM\n",
2023 pr_debug("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 1987 (targetless_le_arp) ? "targetless " : "", mac_addr);
2024 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
2025 mac_addr[4], mac_addr[5]);
2026 1988
2027 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1989 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2028 entry = lec_arp_find(priv, mac_addr); 1990 entry = lec_arp_find(priv, mac_addr);
@@ -2032,7 +1994,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2032 * we have no entry in the cache. 7.1.30 1994 * we have no entry in the cache. 7.1.30
2033 */ 1995 */
2034 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 1996 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
2035 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1997 hlist_for_each_entry_safe(entry, node, next,
1998 &priv->lec_arp_empty_ones, next) {
2036 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1999 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
2037 hlist_del(&entry->next); 2000 hlist_del(&entry->next);
2038 del_timer(&entry->timer); 2001 del_timer(&entry->timer);
@@ -2076,7 +2039,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2076 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 2039 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
2077 del_timer(&entry->timer); 2040 del_timer(&entry->timer);
2078 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2041 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2079 hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) { 2042 hlist_for_each_entry(tmp, node,
2043 &priv->lec_arp_tables[i], next) {
2080 if (entry != tmp && 2044 if (entry != tmp &&
2081 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { 2045 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
2082 /* Vcc to this host exists */ 2046 /* Vcc to this host exists */
@@ -2121,14 +2085,13 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2121 int i, found_entry = 0; 2085 int i, found_entry = 0;
2122 2086
2123 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2087 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2088 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2124 if (ioc_data->receive == 2) { 2089 if (ioc_data->receive == 2) {
2125 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2126
2127 pr_debug("LEC_ARP: Attaching mcast forward\n"); 2090 pr_debug("LEC_ARP: Attaching mcast forward\n");
2128#if 0 2091#if 0
2129 entry = lec_arp_find(priv, bus_mac); 2092 entry = lec_arp_find(priv, bus_mac);
2130 if (!entry) { 2093 if (!entry) {
2131 printk("LEC_ARP: Multicast entry not found!\n"); 2094 pr_info("LEC_ARP: Multicast entry not found!\n");
2132 goto out; 2095 goto out;
2133 } 2096 }
2134 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2097 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
@@ -2149,19 +2112,17 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2149 * Vcc which we don't want to make default vcc, 2112 * Vcc which we don't want to make default vcc,
2150 * attach it anyway. 2113 * attach it anyway.
2151 */ 2114 */
2152 pr_debug 2115 pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2153 ("LEC_ARP:Attaching data direct, not default: " 2116 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2154 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2117 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2155 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2118 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2156 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2119 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2157 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2120 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2158 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2121 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2159 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2122 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2160 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2123 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2161 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2124 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2162 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2125 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2163 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2164 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2165 entry = make_entry(priv, bus_mac); 2126 entry = make_entry(priv, bus_mac);
2166 if (entry == NULL) 2127 if (entry == NULL)
2167 goto out; 2128 goto out;
@@ -2177,29 +2138,28 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2177 dump_arp_table(priv); 2138 dump_arp_table(priv);
2178 goto out; 2139 goto out;
2179 } 2140 }
2180 pr_debug 2141 pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2181 ("LEC_ARP:Attaching data direct, default: " 2142 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2182 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2143 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2183 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2144 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2184 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2145 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2185 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2146 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2186 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2147 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2187 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2148 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2188 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2149 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2189 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2150 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2190 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2151 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2191 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2192 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2193 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2152 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2194 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2153 hlist_for_each_entry(entry, node,
2154 &priv->lec_arp_tables[i], next) {
2195 if (memcmp 2155 if (memcmp
2196 (ioc_data->atm_addr, entry->atm_addr, 2156 (ioc_data->atm_addr, entry->atm_addr,
2197 ATM_ESA_LEN) == 0) { 2157 ATM_ESA_LEN) == 0) {
2198 pr_debug("LEC_ARP: Attaching data direct\n"); 2158 pr_debug("LEC_ARP: Attaching data direct\n");
2199 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", 2159 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n",
2200 entry->vcc ? entry->vcc->vci : 0, 2160 entry->vcc ? entry->vcc->vci : 0,
2201 entry->recv_vcc ? entry->recv_vcc-> 2161 entry->recv_vcc ? entry->recv_vcc->
2202 vci : 0); 2162 vci : 0);
2203 found_entry = 1; 2163 found_entry = 1;
2204 del_timer(&entry->timer); 2164 del_timer(&entry->timer);
2205 entry->vcc = vcc; 2165 entry->vcc = vcc;
@@ -2271,19 +2231,21 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2271 struct lec_arp_table *entry; 2231 struct lec_arp_table *entry;
2272 int i; 2232 int i;
2273 2233
2274 pr_debug("LEC:lec_flush_complete %lx\n", tran_id); 2234 pr_debug("%lx\n", tran_id);
2275restart: 2235restart:
2276 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2236 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2277 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2237 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2278 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2238 hlist_for_each_entry(entry, node,
2279 if (entry->flush_tran_id == tran_id 2239 &priv->lec_arp_tables[i], next) {
2280 && entry->status == ESI_FLUSH_PENDING) { 2240 if (entry->flush_tran_id == tran_id &&
2241 entry->status == ESI_FLUSH_PENDING) {
2281 struct sk_buff *skb; 2242 struct sk_buff *skb;
2282 struct atm_vcc *vcc = entry->vcc; 2243 struct atm_vcc *vcc = entry->vcc;
2283 2244
2284 lec_arp_hold(entry); 2245 lec_arp_hold(entry);
2285 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2246 spin_unlock_irqrestore(&priv->lec_arp_lock,
2286 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 2247 flags);
2248 while ((skb = skb_dequeue(&entry->tx_wait)))
2287 lec_send(vcc, skb); 2249 lec_send(vcc, skb);
2288 entry->last_used = jiffies; 2250 entry->last_used = jiffies;
2289 entry->status = ESI_FORWARD_DIRECT; 2251 entry->status = ESI_FORWARD_DIRECT;
@@ -2308,11 +2270,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
2308 2270
2309 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2271 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2310 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) 2272 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
2311 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2273 hlist_for_each_entry(entry, node,
2274 &priv->lec_arp_tables[i], next) {
2312 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2275 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2313 entry->flush_tran_id = tran_id; 2276 entry->flush_tran_id = tran_id;
2314 pr_debug("Set flush transaction id to %lx for %p\n", 2277 pr_debug("Set flush transaction id to %lx for %p\n",
2315 tran_id, entry); 2278 tran_id, entry);
2316 } 2279 }
2317 } 2280 }
2318 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2281 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
@@ -2328,7 +2291,8 @@ static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
2328 struct lec_vcc_priv *vpriv; 2291 struct lec_vcc_priv *vpriv;
2329 int err = 0; 2292 int err = 0;
2330 2293
2331 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 2294 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
2295 if (!vpriv)
2332 return -ENOMEM; 2296 return -ENOMEM;
2333 vpriv->xoff = 0; 2297 vpriv->xoff = 0;
2334 vpriv->old_pop = vcc->pop; 2298 vpriv->old_pop = vcc->pop;
@@ -2368,18 +2332,19 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2368 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2332 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2369 2333
2370 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2334 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2371 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 2335 hlist_for_each_entry_safe(entry, node, next,
2336 &priv->lec_arp_tables[i], next) {
2372 if (vcc == entry->vcc) { 2337 if (vcc == entry->vcc) {
2373 lec_arp_remove(priv, entry); 2338 lec_arp_remove(priv, entry);
2374 lec_arp_put(entry); 2339 lec_arp_put(entry);
2375 if (priv->mcast_vcc == vcc) { 2340 if (priv->mcast_vcc == vcc)
2376 priv->mcast_vcc = NULL; 2341 priv->mcast_vcc = NULL;
2377 }
2378 } 2342 }
2379 } 2343 }
2380 } 2344 }
2381 2345
2382 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2346 hlist_for_each_entry_safe(entry, node, next,
2347 &priv->lec_arp_empty_ones, next) {
2383 if (entry->vcc == vcc) { 2348 if (entry->vcc == vcc) {
2384 lec_arp_clear_vccs(entry); 2349 lec_arp_clear_vccs(entry);
2385 del_timer(&entry->timer); 2350 del_timer(&entry->timer);
@@ -2388,7 +2353,8 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2388 } 2353 }
2389 } 2354 }
2390 2355
2391 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 2356 hlist_for_each_entry_safe(entry, node, next,
2357 &priv->lec_no_forward, next) {
2392 if (entry->recv_vcc == vcc) { 2358 if (entry->recv_vcc == vcc) {
2393 lec_arp_clear_vccs(entry); 2359 lec_arp_clear_vccs(entry);
2394 del_timer(&entry->timer); 2360 del_timer(&entry->timer);
@@ -2429,14 +2395,16 @@ lec_arp_check_empties(struct lec_priv *priv,
2429 src = hdr->h_source; 2395 src = hdr->h_source;
2430 2396
2431 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2397 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2432 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2398 hlist_for_each_entry_safe(entry, node, next,
2399 &priv->lec_arp_empty_ones, next) {
2433 if (vcc == entry->vcc) { 2400 if (vcc == entry->vcc) {
2434 del_timer(&entry->timer); 2401 del_timer(&entry->timer);
2435 memcpy(entry->mac_addr, src, ETH_ALEN); 2402 memcpy(entry->mac_addr, src, ETH_ALEN);
2436 entry->status = ESI_FORWARD_DIRECT; 2403 entry->status = ESI_FORWARD_DIRECT;
2437 entry->last_used = jiffies; 2404 entry->last_used = jiffies;
2438 /* We might have got an entry */ 2405 /* We might have got an entry */
2439 if ((tmp = lec_arp_find(priv, src))) { 2406 tmp = lec_arp_find(priv, src);
2407 if (tmp) {
2440 lec_arp_remove(priv, tmp); 2408 lec_arp_remove(priv, tmp);
2441 lec_arp_put(tmp); 2409 lec_arp_put(tmp);
2442 } 2410 }
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 38a6cb0863f0..436f2e177657 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1,5 +1,8 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
2
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/slab.h>
3#include <linux/timer.h> 6#include <linux/timer.h>
4#include <linux/init.h> 7#include <linux/init.h>
5#include <linux/bitops.h> 8#include <linux/bitops.h>
@@ -13,8 +16,8 @@
13#include <net/sock.h> 16#include <net/sock.h>
14#include <linux/skbuff.h> 17#include <linux/skbuff.h>
15#include <linux/ip.h> 18#include <linux/ip.h>
19#include <linux/uaccess.h>
16#include <asm/byteorder.h> 20#include <asm/byteorder.h>
17#include <asm/uaccess.h>
18#include <net/checksum.h> /* for ip_fast_csum() */ 21#include <net/checksum.h> /* for ip_fast_csum() */
19#include <net/arp.h> 22#include <net/arp.h>
20#include <net/dst.h> 23#include <net/dst.h>
@@ -36,31 +39,47 @@
36 */ 39 */
37 40
38#if 0 41#if 0
39#define dprintk printk /* debug */ 42#define dprintk(format, args...) \
43 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
44#define dprintk_cont(format, args...) printk(KERN_CONT format, ##args)
40#else 45#else
41#define dprintk(format,args...) 46#define dprintk(format, args...) \
47 do { if (0) \
48 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
49 } while (0)
50#define dprintk_cont(format, args...) \
51 do { if (0) printk(KERN_CONT format, ##args); } while (0)
42#endif 52#endif
43 53
44#if 0 54#if 0
45#define ddprintk printk /* more debug */ 55#define ddprintk(format, args...) \
56 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
57#define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args)
46#else 58#else
47#define ddprintk(format,args...) 59#define ddprintk(format, args...) \
60 do { if (0) \
61 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
62 } while (0)
63#define ddprintk_cont(format, args...) \
64 do { if (0) printk(KERN_CONT format, ##args); } while (0)
48#endif 65#endif
49 66
50
51
52#define MPOA_TAG_LEN 4 67#define MPOA_TAG_LEN 4
53 68
54/* mpc_daemon -> kernel */ 69/* mpc_daemon -> kernel */
55static void MPOA_trigger_rcvd (struct k_message *msg, struct mpoa_client *mpc); 70static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc);
56static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc); 71static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc);
57static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 72static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
58static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 73static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
59static void mps_death(struct k_message *msg, struct mpoa_client *mpc); 74static void mps_death(struct k_message *msg, struct mpoa_client *mpc);
60static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action); 75static void clean_up(struct k_message *msg, struct mpoa_client *mpc,
61static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc); 76 int action);
62static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 77static void MPOA_cache_impos_rcvd(struct k_message *msg,
63static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 78 struct mpoa_client *mpc);
79static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
80 struct mpoa_client *mpc);
81static void set_mps_mac_addr_rcvd(struct k_message *mesg,
82 struct mpoa_client *mpc);
64 83
65static const uint8_t *copy_macs(struct mpoa_client *mpc, 84static const uint8_t *copy_macs(struct mpoa_client *mpc,
66 const uint8_t *router_mac, 85 const uint8_t *router_mac,
@@ -74,10 +93,11 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
74 93
75static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); 94static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
76static netdev_tx_t mpc_send_packet(struct sk_buff *skb, 95static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
77 struct net_device *dev); 96 struct net_device *dev);
78static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); 97static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
98 unsigned long event, void *dev);
79static void mpc_timer_refresh(void); 99static void mpc_timer_refresh(void);
80static void mpc_cache_check( unsigned long checking_time ); 100static void mpc_cache_check(unsigned long checking_time);
81 101
82static struct llc_snap_hdr llc_snap_mpoa_ctrl = { 102static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
83 0xaa, 0xaa, 0x03, 103 0xaa, 0xaa, 0x03,
@@ -167,7 +187,7 @@ struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
167 187
168 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); 188 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL);
169 if (entry == NULL) { 189 if (entry == NULL) {
170 printk("mpoa: atm_mpoa_add_qos: out of memory\n"); 190 pr_info("mpoa: out of memory\n");
171 return entry; 191 return entry;
172 } 192 }
173 193
@@ -185,10 +205,9 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
185 struct atm_mpoa_qos *qos; 205 struct atm_mpoa_qos *qos;
186 206
187 qos = qos_head; 207 qos = qos_head;
188 while( qos != NULL ){ 208 while (qos) {
189 if(qos->ipaddr == dst_ip) { 209 if (qos->ipaddr == dst_ip)
190 break; 210 break;
191 }
192 qos = qos->next; 211 qos = qos->next;
193 } 212 }
194 213
@@ -200,10 +219,10 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
200 */ 219 */
201int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry) 220int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry)
202{ 221{
203
204 struct atm_mpoa_qos *curr; 222 struct atm_mpoa_qos *curr;
205 223
206 if (entry == NULL) return 0; 224 if (entry == NULL)
225 return 0;
207 if (entry == qos_head) { 226 if (entry == qos_head) {
208 qos_head = qos_head->next; 227 qos_head = qos_head->next;
209 kfree(entry); 228 kfree(entry);
@@ -234,9 +253,17 @@ void atm_mpoa_disp_qos(struct seq_file *m)
234 253
235 while (qos != NULL) { 254 while (qos != NULL) {
236 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n", 255 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
237 &qos->ipaddr, 256 &qos->ipaddr,
238 qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu, 257 qos->qos.txtp.max_pcr,
239 qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu); 258 qos->qos.txtp.pcr,
259 qos->qos.txtp.min_pcr,
260 qos->qos.txtp.max_cdv,
261 qos->qos.txtp.max_sdu,
262 qos->qos.rxtp.max_pcr,
263 qos->qos.rxtp.pcr,
264 qos->qos.rxtp.min_pcr,
265 qos->qos.rxtp.max_cdv,
266 qos->qos.rxtp.max_sdu);
240 qos = qos->next; 267 qos = qos->next;
241 } 268 }
242} 269}
@@ -256,7 +283,7 @@ static struct mpoa_client *alloc_mpc(void)
256{ 283{
257 struct mpoa_client *mpc; 284 struct mpoa_client *mpc;
258 285
259 mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL); 286 mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL);
260 if (mpc == NULL) 287 if (mpc == NULL)
261 return NULL; 288 return NULL;
262 rwlock_init(&mpc->ingress_lock); 289 rwlock_init(&mpc->ingress_lock);
@@ -266,7 +293,7 @@ static struct mpoa_client *alloc_mpc(void)
266 293
267 mpc->parameters.mpc_p1 = MPC_P1; 294 mpc->parameters.mpc_p1 = MPC_P1;
268 mpc->parameters.mpc_p2 = MPC_P2; 295 mpc->parameters.mpc_p2 = MPC_P2;
269 memset(mpc->parameters.mpc_p3,0,sizeof(mpc->parameters.mpc_p3)); 296 memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3));
270 mpc->parameters.mpc_p4 = MPC_P4; 297 mpc->parameters.mpc_p4 = MPC_P4;
271 mpc->parameters.mpc_p5 = MPC_P5; 298 mpc->parameters.mpc_p5 = MPC_P5;
272 mpc->parameters.mpc_p6 = MPC_P6; 299 mpc->parameters.mpc_p6 = MPC_P6;
@@ -286,9 +313,9 @@ static struct mpoa_client *alloc_mpc(void)
286static void start_mpc(struct mpoa_client *mpc, struct net_device *dev) 313static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
287{ 314{
288 315
289 dprintk("mpoa: (%s) start_mpc:\n", mpc->dev->name); 316 dprintk("(%s)\n", mpc->dev->name);
290 if (!dev->netdev_ops) 317 if (!dev->netdev_ops)
291 printk("mpoa: (%s) start_mpc not starting\n", dev->name); 318 pr_info("(%s) not starting\n", dev->name);
292 else { 319 else {
293 mpc->old_ops = dev->netdev_ops; 320 mpc->old_ops = dev->netdev_ops;
294 mpc->new_ops = *mpc->old_ops; 321 mpc->new_ops = *mpc->old_ops;
@@ -300,14 +327,14 @@ static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
300static void stop_mpc(struct mpoa_client *mpc) 327static void stop_mpc(struct mpoa_client *mpc)
301{ 328{
302 struct net_device *dev = mpc->dev; 329 struct net_device *dev = mpc->dev;
303 dprintk("mpoa: (%s) stop_mpc:", mpc->dev->name); 330 dprintk("(%s)", mpc->dev->name);
304 331
305 /* Lets not nullify lec device's dev->hard_start_xmit */ 332 /* Lets not nullify lec device's dev->hard_start_xmit */
306 if (dev->netdev_ops != &mpc->new_ops) { 333 if (dev->netdev_ops != &mpc->new_ops) {
307 dprintk(" mpc already stopped, not fatal\n"); 334 dprintk_cont(" mpc already stopped, not fatal\n");
308 return; 335 return;
309 } 336 }
310 dprintk("\n"); 337 dprintk_cont("\n");
311 338
312 dev->netdev_ops = mpc->old_ops; 339 dev->netdev_ops = mpc->old_ops;
313 mpc->old_ops = NULL; 340 mpc->old_ops = NULL;
@@ -319,25 +346,18 @@ static const char *mpoa_device_type_string(char type) __attribute__ ((unused));
319 346
320static const char *mpoa_device_type_string(char type) 347static const char *mpoa_device_type_string(char type)
321{ 348{
322 switch(type) { 349 switch (type) {
323 case NON_MPOA: 350 case NON_MPOA:
324 return "non-MPOA device"; 351 return "non-MPOA device";
325 break;
326 case MPS: 352 case MPS:
327 return "MPS"; 353 return "MPS";
328 break;
329 case MPC: 354 case MPC:
330 return "MPC"; 355 return "MPC";
331 break;
332 case MPS_AND_MPC: 356 case MPS_AND_MPC:
333 return "both MPS and MPC"; 357 return "both MPS and MPC";
334 break;
335 default:
336 return "unspecified (non-MPOA) device";
337 break;
338 } 358 }
339 359
340 return ""; /* not reached */ 360 return "unspecified (non-MPOA) device";
341} 361}
342 362
343/* 363/*
@@ -362,26 +382,28 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
362 struct mpoa_client *mpc; 382 struct mpoa_client *mpc;
363 383
364 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ 384 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
365 dprintk("mpoa: (%s) lane2_assoc_ind: received TLV(s), ", dev->name); 385 dprintk("(%s) received TLV(s), ", dev->name);
366 dprintk("total length of all TLVs %d\n", sizeoftlvs); 386 dprintk("total length of all TLVs %d\n", sizeoftlvs);
367 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */ 387 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */
368 if (mpc == NULL) { 388 if (mpc == NULL) {
369 printk("mpoa: (%s) lane2_assoc_ind: no mpc\n", dev->name); 389 pr_info("(%s) no mpc\n", dev->name);
370 return; 390 return;
371 } 391 }
372 end_of_tlvs = tlvs + sizeoftlvs; 392 end_of_tlvs = tlvs + sizeoftlvs;
373 while (end_of_tlvs - tlvs >= 5) { 393 while (end_of_tlvs - tlvs >= 5) {
374 type = (tlvs[0] << 24) | (tlvs[1] << 16) | (tlvs[2] << 8) | tlvs[3]; 394 type = ((tlvs[0] << 24) | (tlvs[1] << 16) |
395 (tlvs[2] << 8) | tlvs[3]);
375 length = tlvs[4]; 396 length = tlvs[4];
376 tlvs += 5; 397 tlvs += 5;
377 dprintk(" type 0x%x length %02x\n", type, length); 398 dprintk(" type 0x%x length %02x\n", type, length);
378 if (tlvs + length > end_of_tlvs) { 399 if (tlvs + length > end_of_tlvs) {
379 printk("TLV value extends past its buffer, aborting parse\n"); 400 pr_info("TLV value extends past its buffer, aborting parse\n");
380 return; 401 return;
381 } 402 }
382 403
383 if (type == 0) { 404 if (type == 0) {
384 printk("mpoa: (%s) lane2_assoc_ind: TLV type was 0, returning\n", dev->name); 405 pr_info("mpoa: (%s) TLV type was 0, returning\n",
406 dev->name);
385 return; 407 return;
386 } 408 }
387 409
@@ -391,39 +413,48 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
391 } 413 }
392 mpoa_device_type = *tlvs++; 414 mpoa_device_type = *tlvs++;
393 number_of_mps_macs = *tlvs++; 415 number_of_mps_macs = *tlvs++;
394 dprintk("mpoa: (%s) MPOA device type '%s', ", dev->name, mpoa_device_type_string(mpoa_device_type)); 416 dprintk("(%s) MPOA device type '%s', ",
417 dev->name, mpoa_device_type_string(mpoa_device_type));
395 if (mpoa_device_type == MPS_AND_MPC && 418 if (mpoa_device_type == MPS_AND_MPC &&
396 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */ 419 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */
397 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 420 pr_info("(%s) short MPOA Device Type TLV\n",
398 dev->name); 421 dev->name);
399 continue; 422 continue;
400 } 423 }
401 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) 424 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) &&
402 && length < 22 + number_of_mps_macs*ETH_ALEN) { 425 length < 22 + number_of_mps_macs*ETH_ALEN) {
403 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 426 pr_info("(%s) short MPOA Device Type TLV\n", dev->name);
404 dev->name);
405 continue; 427 continue;
406 } 428 }
407 if (mpoa_device_type != MPS && mpoa_device_type != MPS_AND_MPC) { 429 if (mpoa_device_type != MPS &&
408 dprintk("ignoring non-MPS device\n"); 430 mpoa_device_type != MPS_AND_MPC) {
409 if (mpoa_device_type == MPC) tlvs += 20; 431 dprintk("ignoring non-MPS device ");
432 if (mpoa_device_type == MPC)
433 tlvs += 20;
410 continue; /* we are only interested in MPSs */ 434 continue; /* we are only interested in MPSs */
411 } 435 }
412 if (number_of_mps_macs == 0 && mpoa_device_type == MPS_AND_MPC) { 436 if (number_of_mps_macs == 0 &&
413 printk("\nmpoa: (%s) lane2_assoc_ind: MPS_AND_MPC has zero MACs\n", dev->name); 437 mpoa_device_type == MPS_AND_MPC) {
438 pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name);
414 continue; /* someone should read the spec */ 439 continue; /* someone should read the spec */
415 } 440 }
416 dprintk("this MPS has %d MAC addresses\n", number_of_mps_macs); 441 dprintk_cont("this MPS has %d MAC addresses\n",
442 number_of_mps_macs);
417 443
418 /* ok, now we can go and tell our daemon the control address of MPS */ 444 /*
445 * ok, now we can go and tell our daemon
446 * the control address of MPS
447 */
419 send_set_mps_ctrl_addr(tlvs, mpc); 448 send_set_mps_ctrl_addr(tlvs, mpc);
420 449
421 tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type); 450 tlvs = copy_macs(mpc, mac_addr, tlvs,
422 if (tlvs == NULL) return; 451 number_of_mps_macs, mpoa_device_type);
452 if (tlvs == NULL)
453 return;
423 } 454 }
424 if (end_of_tlvs - tlvs != 0) 455 if (end_of_tlvs - tlvs != 0)
425 printk("mpoa: (%s) lane2_assoc_ind: ignoring %Zd bytes of trailing TLV carbage\n", 456 pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n",
426 dev->name, end_of_tlvs - tlvs); 457 dev->name, end_of_tlvs - tlvs);
427 return; 458 return;
428} 459}
429 460
@@ -441,11 +472,12 @@ static const uint8_t *copy_macs(struct mpoa_client *mpc,
441 num_macs = (mps_macs > 1) ? mps_macs : 1; 472 num_macs = (mps_macs > 1) ? mps_macs : 1;
442 473
443 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */ 474 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */
444 if (mpc->number_of_mps_macs != 0) kfree(mpc->mps_macs); 475 if (mpc->number_of_mps_macs != 0)
476 kfree(mpc->mps_macs);
445 mpc->number_of_mps_macs = 0; 477 mpc->number_of_mps_macs = 0;
446 mpc->mps_macs = kmalloc(num_macs*ETH_ALEN, GFP_KERNEL); 478 mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL);
447 if (mpc->mps_macs == NULL) { 479 if (mpc->mps_macs == NULL) {
448 printk("mpoa: (%s) copy_macs: out of mem\n", mpc->dev->name); 480 pr_info("(%s) out of mem\n", mpc->dev->name);
449 return NULL; 481 return NULL;
450 } 482 }
451 } 483 }
@@ -478,24 +510,30 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
478 iph = (struct iphdr *)buff; 510 iph = (struct iphdr *)buff;
479 ipaddr = iph->daddr; 511 ipaddr = iph->daddr;
480 512
481 ddprintk("mpoa: (%s) send_via_shortcut: ipaddr 0x%x\n", mpc->dev->name, ipaddr); 513 ddprintk("(%s) ipaddr 0x%x\n",
514 mpc->dev->name, ipaddr);
482 515
483 entry = mpc->in_ops->get(ipaddr, mpc); 516 entry = mpc->in_ops->get(ipaddr, mpc);
484 if (entry == NULL) { 517 if (entry == NULL) {
485 entry = mpc->in_ops->add_entry(ipaddr, mpc); 518 entry = mpc->in_ops->add_entry(ipaddr, mpc);
486 if (entry != NULL) mpc->in_ops->put(entry); 519 if (entry != NULL)
520 mpc->in_ops->put(entry);
487 return 1; 521 return 1;
488 } 522 }
489 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN){ /* threshold not exceeded or VCC not ready */ 523 /* threshold not exceeded or VCC not ready */
490 ddprintk("mpoa: (%s) send_via_shortcut: cache_hit: returns != OPEN\n", mpc->dev->name); 524 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
525 ddprintk("(%s) cache_hit: returns != OPEN\n",
526 mpc->dev->name);
491 mpc->in_ops->put(entry); 527 mpc->in_ops->put(entry);
492 return 1; 528 return 1;
493 } 529 }
494 530
495 ddprintk("mpoa: (%s) send_via_shortcut: using shortcut\n", mpc->dev->name); 531 ddprintk("(%s) using shortcut\n",
532 mpc->dev->name);
496 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */ 533 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
497 if (iph->ttl <= 1) { 534 if (iph->ttl <= 1) {
498 ddprintk("mpoa: (%s) send_via_shortcut: IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl); 535 ddprintk("(%s) IP ttl = %u, using LANE\n",
536 mpc->dev->name, iph->ttl);
499 mpc->in_ops->put(entry); 537 mpc->in_ops->put(entry);
500 return 1; 538 return 1;
501 } 539 }
@@ -504,15 +542,18 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
504 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 542 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
505 543
506 if (entry->ctrl_info.tag != 0) { 544 if (entry->ctrl_info.tag != 0) {
507 ddprintk("mpoa: (%s) send_via_shortcut: adding tag 0x%x\n", mpc->dev->name, entry->ctrl_info.tag); 545 ddprintk("(%s) adding tag 0x%x\n",
546 mpc->dev->name, entry->ctrl_info.tag);
508 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; 547 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
509 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 548 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
510 skb_push(skb, sizeof(tagged_llc_snap_hdr)); /* add LLC/SNAP header */ 549 skb_push(skb, sizeof(tagged_llc_snap_hdr));
550 /* add LLC/SNAP header */
511 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr, 551 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
512 sizeof(tagged_llc_snap_hdr)); 552 sizeof(tagged_llc_snap_hdr));
513 } else { 553 } else {
514 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 554 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
515 skb_push(skb, sizeof(struct llc_snap_hdr)); /* add LLC/SNAP header + tag */ 555 skb_push(skb, sizeof(struct llc_snap_hdr));
556 /* add LLC/SNAP header + tag */
516 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data, 557 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
517 sizeof(struct llc_snap_hdr)); 558 sizeof(struct llc_snap_hdr));
518 } 559 }
@@ -537,8 +578,8 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
537 int i = 0; 578 int i = 0;
538 579
539 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */ 580 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
540 if(mpc == NULL) { 581 if (mpc == NULL) {
541 printk("mpoa: (%s) mpc_send_packet: no MPC found\n", dev->name); 582 pr_info("(%s) no MPC found\n", dev->name);
542 goto non_ip; 583 goto non_ip;
543 } 584 }
544 585
@@ -554,14 +595,15 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
554 goto non_ip; 595 goto non_ip;
555 596
556 while (i < mpc->number_of_mps_macs) { 597 while (i < mpc->number_of_mps_macs) {
557 if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) 598 if (!compare_ether_addr(eth->h_dest,
558 if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ 599 (mpc->mps_macs + i*ETH_ALEN)))
559 return NETDEV_TX_OK; /* success! */ 600 if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
601 return NETDEV_TX_OK;
560 i++; 602 i++;
561 } 603 }
562 604
563 non_ip: 605non_ip:
564 return mpc->old_ops->ndo_start_xmit(skb,dev); 606 return mpc->old_ops->ndo_start_xmit(skb, dev);
565} 607}
566 608
567static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) 609static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
@@ -574,7 +616,8 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
574 616
575 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); 617 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
576 if (bytes_left != 0) { 618 if (bytes_left != 0) {
577 printk("mpoa: mpc_vcc_attach: Short read (missed %d bytes) from userland\n", bytes_left); 619 pr_info("mpoa:Short read (missed %d bytes) from userland\n",
620 bytes_left);
578 return -EFAULT; 621 return -EFAULT;
579 } 622 }
580 ipaddr = ioc_data.ipaddr; 623 ipaddr = ioc_data.ipaddr;
@@ -587,18 +630,20 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
587 630
588 if (ioc_data.type == MPC_SOCKET_INGRESS) { 631 if (ioc_data.type == MPC_SOCKET_INGRESS) {
589 in_entry = mpc->in_ops->get(ipaddr, mpc); 632 in_entry = mpc->in_ops->get(ipaddr, mpc);
590 if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) { 633 if (in_entry == NULL ||
591 printk("mpoa: (%s) mpc_vcc_attach: did not find RESOLVED entry from ingress cache\n", 634 in_entry->entry_state < INGRESS_RESOLVED) {
635 pr_info("(%s) did not find RESOLVED entry from ingress cache\n",
592 mpc->dev->name); 636 mpc->dev->name);
593 if (in_entry != NULL) mpc->in_ops->put(in_entry); 637 if (in_entry != NULL)
638 mpc->in_ops->put(in_entry);
594 return -EINVAL; 639 return -EINVAL;
595 } 640 }
596 printk("mpoa: (%s) mpc_vcc_attach: attaching ingress SVC, entry = %pI4\n", 641 pr_info("(%s) attaching ingress SVC, entry = %pI4\n",
597 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 642 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
598 in_entry->shortcut = vcc; 643 in_entry->shortcut = vcc;
599 mpc->in_ops->put(in_entry); 644 mpc->in_ops->put(in_entry);
600 } else { 645 } else {
601 printk("mpoa: (%s) mpc_vcc_attach: attaching egress SVC\n", mpc->dev->name); 646 pr_info("(%s) attaching egress SVC\n", mpc->dev->name);
602 } 647 }
603 648
604 vcc->proto_data = mpc->dev; 649 vcc->proto_data = mpc->dev;
@@ -618,27 +663,27 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
618 663
619 mpc = find_mpc_by_lec(dev); 664 mpc = find_mpc_by_lec(dev);
620 if (mpc == NULL) { 665 if (mpc == NULL) {
621 printk("mpoa: (%s) mpc_vcc_close: close for unknown MPC\n", dev->name); 666 pr_info("(%s) close for unknown MPC\n", dev->name);
622 return; 667 return;
623 } 668 }
624 669
625 dprintk("mpoa: (%s) mpc_vcc_close:\n", dev->name); 670 dprintk("(%s)\n", dev->name);
626 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc); 671 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
627 if (in_entry) { 672 if (in_entry) {
628 dprintk("mpoa: (%s) mpc_vcc_close: ingress SVC closed ip = %pI4\n", 673 dprintk("(%s) ingress SVC closed ip = %pI4\n",
629 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 674 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
630 in_entry->shortcut = NULL; 675 in_entry->shortcut = NULL;
631 mpc->in_ops->put(in_entry); 676 mpc->in_ops->put(in_entry);
632 } 677 }
633 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc); 678 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc);
634 if (eg_entry) { 679 if (eg_entry) {
635 dprintk("mpoa: (%s) mpc_vcc_close: egress SVC closed\n", mpc->dev->name); 680 dprintk("(%s) egress SVC closed\n", mpc->dev->name);
636 eg_entry->shortcut = NULL; 681 eg_entry->shortcut = NULL;
637 mpc->eg_ops->put(eg_entry); 682 mpc->eg_ops->put(eg_entry);
638 } 683 }
639 684
640 if (in_entry == NULL && eg_entry == NULL) 685 if (in_entry == NULL && eg_entry == NULL)
641 dprintk("mpoa: (%s) mpc_vcc_close: unused vcc closed\n", dev->name); 686 dprintk("(%s) unused vcc closed\n", dev->name);
642 687
643 return; 688 return;
644} 689}
@@ -652,18 +697,19 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
652 __be32 tag; 697 __be32 tag;
653 char *tmp; 698 char *tmp;
654 699
655 ddprintk("mpoa: (%s) mpc_push:\n", dev->name); 700 ddprintk("(%s)\n", dev->name);
656 if (skb == NULL) { 701 if (skb == NULL) {
657 dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name); 702 dprintk("(%s) null skb, closing VCC\n", dev->name);
658 mpc_vcc_close(vcc, dev); 703 mpc_vcc_close(vcc, dev);
659 return; 704 return;
660 } 705 }
661 706
662 skb->dev = dev; 707 skb->dev = dev;
663 if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { 708 if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
709 sizeof(struct llc_snap_hdr)) == 0) {
664 struct sock *sk = sk_atm(vcc); 710 struct sock *sk = sk_atm(vcc);
665 711
666 dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name); 712 dprintk("(%s) control packet arrived\n", dev->name);
667 /* Pass control packets to daemon */ 713 /* Pass control packets to daemon */
668 skb_queue_tail(&sk->sk_receive_queue, skb); 714 skb_queue_tail(&sk->sk_receive_queue, skb);
669 sk->sk_data_ready(sk, skb->len); 715 sk->sk_data_ready(sk, skb->len);
@@ -675,20 +721,22 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
675 721
676 mpc = find_mpc_by_lec(dev); 722 mpc = find_mpc_by_lec(dev);
677 if (mpc == NULL) { 723 if (mpc == NULL) {
678 printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name); 724 pr_info("(%s) unknown MPC\n", dev->name);
679 return; 725 return;
680 } 726 }
681 727
682 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */ 728 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
683 ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name); 729 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
730 ddprintk("(%s) tagged data packet arrived\n", dev->name);
684 731
685 } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */ 732 } else if (memcmp(skb->data, &llc_snap_mpoa_data,
686 printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name); 733 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
687 printk(" mpc_push: non-tagged data unsupported, purging\n"); 734 pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n",
735 dev->name);
688 dev_kfree_skb_any(skb); 736 dev_kfree_skb_any(skb);
689 return; 737 return;
690 } else { 738 } else {
691 printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name); 739 pr_info("(%s) garbage arrived, purging\n", dev->name);
692 dev_kfree_skb_any(skb); 740 dev_kfree_skb_any(skb);
693 return; 741 return;
694 } 742 }
@@ -698,8 +746,8 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
698 746
699 eg = mpc->eg_ops->get_by_tag(tag, mpc); 747 eg = mpc->eg_ops->get_by_tag(tag, mpc);
700 if (eg == NULL) { 748 if (eg == NULL) {
701 printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n", 749 pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
702 dev->name,tag); 750 dev->name, tag);
703 purge_egress_shortcut(vcc, NULL); 751 purge_egress_shortcut(vcc, NULL);
704 dev_kfree_skb_any(skb); 752 dev_kfree_skb_any(skb);
705 return; 753 return;
@@ -711,13 +759,15 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
711 */ 759 */
712 if (eg->shortcut == NULL) { 760 if (eg->shortcut == NULL) {
713 eg->shortcut = vcc; 761 eg->shortcut = vcc;
714 printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name); 762 pr_info("(%s) egress SVC in use\n", dev->name);
715 } 763 }
716 764
717 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */ 765 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
718 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */ 766 /* get rid of LLC/SNAP header */
767 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
768 /* LLC/SNAP is shorter than MAC header :( */
719 dev_kfree_skb_any(skb); 769 dev_kfree_skb_any(skb);
720 if (new_skb == NULL){ 770 if (new_skb == NULL) {
721 mpc->eg_ops->put(eg); 771 mpc->eg_ops->put(eg);
722 return; 772 return;
723 } 773 }
@@ -750,7 +800,7 @@ static struct atm_dev mpc_dev = {
750 /* members not explicitly initialised will be 0 */ 800 /* members not explicitly initialised will be 0 */
751}; 801};
752 802
753static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg) 803static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
754{ 804{
755 struct mpoa_client *mpc; 805 struct mpoa_client *mpc;
756 struct lec_priv *priv; 806 struct lec_priv *priv;
@@ -770,15 +820,16 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
770 820
771 mpc = find_mpc_by_itfnum(arg); 821 mpc = find_mpc_by_itfnum(arg);
772 if (mpc == NULL) { 822 if (mpc == NULL) {
773 dprintk("mpoa: mpoad_attach: allocating new mpc for itf %d\n", arg); 823 dprintk("allocating new mpc for itf %d\n", arg);
774 mpc = alloc_mpc(); 824 mpc = alloc_mpc();
775 if (mpc == NULL) 825 if (mpc == NULL)
776 return -ENOMEM; 826 return -ENOMEM;
777 mpc->dev_num = arg; 827 mpc->dev_num = arg;
778 mpc->dev = find_lec_by_itfnum(arg); /* NULL if there was no lec */ 828 mpc->dev = find_lec_by_itfnum(arg);
829 /* NULL if there was no lec */
779 } 830 }
780 if (mpc->mpoad_vcc) { 831 if (mpc->mpoad_vcc) {
781 printk("mpoa: mpoad_attach: mpoad is already present for itf %d\n", arg); 832 pr_info("mpoad is already present for itf %d\n", arg);
782 return -EADDRINUSE; 833 return -EADDRINUSE;
783 } 834 }
784 835
@@ -794,8 +845,8 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
794 mpc->mpoad_vcc = vcc; 845 mpc->mpoad_vcc = vcc;
795 vcc->dev = &mpc_dev; 846 vcc->dev = &mpc_dev;
796 vcc_insert_socket(sk_atm(vcc)); 847 vcc_insert_socket(sk_atm(vcc));
797 set_bit(ATM_VF_META,&vcc->flags); 848 set_bit(ATM_VF_META, &vcc->flags);
798 set_bit(ATM_VF_READY,&vcc->flags); 849 set_bit(ATM_VF_READY, &vcc->flags);
799 850
800 if (mpc->dev) { 851 if (mpc->dev) {
801 char empty[ATM_ESA_LEN]; 852 char empty[ATM_ESA_LEN];
@@ -805,7 +856,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
805 /* set address if mpcd e.g. gets killed and restarted. 856 /* set address if mpcd e.g. gets killed and restarted.
806 * If we do not do it now we have to wait for the next LE_ARP 857 * If we do not do it now we have to wait for the next LE_ARP
807 */ 858 */
808 if ( memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0 ) 859 if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
809 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc); 860 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
810 } 861 }
811 862
@@ -817,7 +868,7 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
817{ 868{
818 struct k_message mesg; 869 struct k_message mesg;
819 870
820 memcpy (mpc->mps_ctrl_addr, addr, ATM_ESA_LEN); 871 memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
821 872
822 mesg.type = SET_MPS_CTRL_ADDR; 873 mesg.type = SET_MPS_CTRL_ADDR;
823 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); 874 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
@@ -833,11 +884,11 @@ static void mpoad_close(struct atm_vcc *vcc)
833 884
834 mpc = find_mpc_by_vcc(vcc); 885 mpc = find_mpc_by_vcc(vcc);
835 if (mpc == NULL) { 886 if (mpc == NULL) {
836 printk("mpoa: mpoad_close: did not find MPC\n"); 887 pr_info("did not find MPC\n");
837 return; 888 return;
838 } 889 }
839 if (!mpc->mpoad_vcc) { 890 if (!mpc->mpoad_vcc) {
840 printk("mpoa: mpoad_close: close for non-present mpoad\n"); 891 pr_info("close for non-present mpoad\n");
841 return; 892 return;
842 } 893 }
843 894
@@ -857,7 +908,7 @@ static void mpoad_close(struct atm_vcc *vcc)
857 kfree_skb(skb); 908 kfree_skb(skb);
858 } 909 }
859 910
860 printk("mpoa: (%s) going down\n", 911 pr_info("(%s) going down\n",
861 (mpc->dev) ? mpc->dev->name : "<unknown>"); 912 (mpc->dev) ? mpc->dev->name : "<unknown>");
862 module_put(THIS_MODULE); 913 module_put(THIS_MODULE);
863 914
@@ -871,61 +922,61 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
871{ 922{
872 923
873 struct mpoa_client *mpc = find_mpc_by_vcc(vcc); 924 struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
874 struct k_message *mesg = (struct k_message*)skb->data; 925 struct k_message *mesg = (struct k_message *)skb->data;
875 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 926 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
876 927
877 if (mpc == NULL) { 928 if (mpc == NULL) {
878 printk("mpoa: msg_from_mpoad: no mpc found\n"); 929 pr_info("no mpc found\n");
879 return 0; 930 return 0;
880 } 931 }
881 dprintk("mpoa: (%s) msg_from_mpoad:", (mpc->dev) ? mpc->dev->name : "<unknown>"); 932 dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>");
882 switch(mesg->type) { 933 switch (mesg->type) {
883 case MPOA_RES_REPLY_RCVD: 934 case MPOA_RES_REPLY_RCVD:
884 dprintk(" mpoa_res_reply_rcvd\n"); 935 dprintk_cont("mpoa_res_reply_rcvd\n");
885 MPOA_res_reply_rcvd(mesg, mpc); 936 MPOA_res_reply_rcvd(mesg, mpc);
886 break; 937 break;
887 case MPOA_TRIGGER_RCVD: 938 case MPOA_TRIGGER_RCVD:
888 dprintk(" mpoa_trigger_rcvd\n"); 939 dprintk_cont("mpoa_trigger_rcvd\n");
889 MPOA_trigger_rcvd(mesg, mpc); 940 MPOA_trigger_rcvd(mesg, mpc);
890 break; 941 break;
891 case INGRESS_PURGE_RCVD: 942 case INGRESS_PURGE_RCVD:
892 dprintk(" nhrp_purge_rcvd\n"); 943 dprintk_cont("nhrp_purge_rcvd\n");
893 ingress_purge_rcvd(mesg, mpc); 944 ingress_purge_rcvd(mesg, mpc);
894 break; 945 break;
895 case EGRESS_PURGE_RCVD: 946 case EGRESS_PURGE_RCVD:
896 dprintk(" egress_purge_reply_rcvd\n"); 947 dprintk_cont("egress_purge_reply_rcvd\n");
897 egress_purge_rcvd(mesg, mpc); 948 egress_purge_rcvd(mesg, mpc);
898 break; 949 break;
899 case MPS_DEATH: 950 case MPS_DEATH:
900 dprintk(" mps_death\n"); 951 dprintk_cont("mps_death\n");
901 mps_death(mesg, mpc); 952 mps_death(mesg, mpc);
902 break; 953 break;
903 case CACHE_IMPOS_RCVD: 954 case CACHE_IMPOS_RCVD:
904 dprintk(" cache_impos_rcvd\n"); 955 dprintk_cont("cache_impos_rcvd\n");
905 MPOA_cache_impos_rcvd(mesg, mpc); 956 MPOA_cache_impos_rcvd(mesg, mpc);
906 break; 957 break;
907 case SET_MPC_CTRL_ADDR: 958 case SET_MPC_CTRL_ADDR:
908 dprintk(" set_mpc_ctrl_addr\n"); 959 dprintk_cont("set_mpc_ctrl_addr\n");
909 set_mpc_ctrl_addr_rcvd(mesg, mpc); 960 set_mpc_ctrl_addr_rcvd(mesg, mpc);
910 break; 961 break;
911 case SET_MPS_MAC_ADDR: 962 case SET_MPS_MAC_ADDR:
912 dprintk(" set_mps_mac_addr\n"); 963 dprintk_cont("set_mps_mac_addr\n");
913 set_mps_mac_addr_rcvd(mesg, mpc); 964 set_mps_mac_addr_rcvd(mesg, mpc);
914 break; 965 break;
915 case CLEAN_UP_AND_EXIT: 966 case CLEAN_UP_AND_EXIT:
916 dprintk(" clean_up_and_exit\n"); 967 dprintk_cont("clean_up_and_exit\n");
917 clean_up(mesg, mpc, DIE); 968 clean_up(mesg, mpc, DIE);
918 break; 969 break;
919 case RELOAD: 970 case RELOAD:
920 dprintk(" reload\n"); 971 dprintk_cont("reload\n");
921 clean_up(mesg, mpc, RELOAD); 972 clean_up(mesg, mpc, RELOAD);
922 break; 973 break;
923 case SET_MPC_PARAMS: 974 case SET_MPC_PARAMS:
924 dprintk(" set_mpc_params\n"); 975 dprintk_cont("set_mpc_params\n");
925 mpc->parameters = mesg->content.params; 976 mpc->parameters = mesg->content.params;
926 break; 977 break;
927 default: 978 default:
928 dprintk(" unknown message %d\n", mesg->type); 979 dprintk_cont("unknown message %d\n", mesg->type);
929 break; 980 break;
930 } 981 }
931 kfree_skb(skb); 982 kfree_skb(skb);
@@ -940,7 +991,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
940 struct sock *sk; 991 struct sock *sk;
941 992
942 if (mpc == NULL || !mpc->mpoad_vcc) { 993 if (mpc == NULL || !mpc->mpoad_vcc) {
943 printk("mpoa: msg_to_mpoad: mesg %d to a non-existent mpoad\n", mesg->type); 994 pr_info("mesg %d to a non-existent mpoad\n", mesg->type);
944 return -ENXIO; 995 return -ENXIO;
945 } 996 }
946 997
@@ -958,7 +1009,8 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
958 return 0; 1009 return 0;
959} 1010}
960 1011
961static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev_ptr) 1012static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
1013 unsigned long event, void *dev_ptr)
962{ 1014{
963 struct net_device *dev; 1015 struct net_device *dev;
964 struct mpoa_client *mpc; 1016 struct mpoa_client *mpc;
@@ -980,25 +1032,24 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
980 priv->lane2_ops->associate_indicator = lane2_assoc_ind; 1032 priv->lane2_ops->associate_indicator = lane2_assoc_ind;
981 mpc = find_mpc_by_itfnum(priv->itfnum); 1033 mpc = find_mpc_by_itfnum(priv->itfnum);
982 if (mpc == NULL) { 1034 if (mpc == NULL) {
983 dprintk("mpoa: mpoa_event_listener: allocating new mpc for %s\n", 1035 dprintk("allocating new mpc for %s\n", dev->name);
984 dev->name);
985 mpc = alloc_mpc(); 1036 mpc = alloc_mpc();
986 if (mpc == NULL) { 1037 if (mpc == NULL) {
987 printk("mpoa: mpoa_event_listener: no new mpc"); 1038 pr_info("no new mpc");
988 break; 1039 break;
989 } 1040 }
990 } 1041 }
991 mpc->dev_num = priv->itfnum; 1042 mpc->dev_num = priv->itfnum;
992 mpc->dev = dev; 1043 mpc->dev = dev;
993 dev_hold(dev); 1044 dev_hold(dev);
994 dprintk("mpoa: (%s) was initialized\n", dev->name); 1045 dprintk("(%s) was initialized\n", dev->name);
995 break; 1046 break;
996 case NETDEV_UNREGISTER: 1047 case NETDEV_UNREGISTER:
997 /* the lec device was deallocated */ 1048 /* the lec device was deallocated */
998 mpc = find_mpc_by_lec(dev); 1049 mpc = find_mpc_by_lec(dev);
999 if (mpc == NULL) 1050 if (mpc == NULL)
1000 break; 1051 break;
1001 dprintk("mpoa: device (%s) was deallocated\n", dev->name); 1052 dprintk("device (%s) was deallocated\n", dev->name);
1002 stop_mpc(mpc); 1053 stop_mpc(mpc);
1003 dev_put(mpc->dev); 1054 dev_put(mpc->dev);
1004 mpc->dev = NULL; 1055 mpc->dev = NULL;
@@ -1008,9 +1059,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1008 mpc = find_mpc_by_lec(dev); 1059 mpc = find_mpc_by_lec(dev);
1009 if (mpc == NULL) 1060 if (mpc == NULL)
1010 break; 1061 break;
1011 if (mpc->mpoad_vcc != NULL) { 1062 if (mpc->mpoad_vcc != NULL)
1012 start_mpc(mpc, dev); 1063 start_mpc(mpc, dev);
1013 }
1014 break; 1064 break;
1015 case NETDEV_DOWN: 1065 case NETDEV_DOWN:
1016 /* the dev was ifconfig'ed down */ 1066 /* the dev was ifconfig'ed down */
@@ -1020,9 +1070,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1020 mpc = find_mpc_by_lec(dev); 1070 mpc = find_mpc_by_lec(dev);
1021 if (mpc == NULL) 1071 if (mpc == NULL)
1022 break; 1072 break;
1023 if (mpc->mpoad_vcc != NULL) { 1073 if (mpc->mpoad_vcc != NULL)
1024 stop_mpc(mpc); 1074 stop_mpc(mpc);
1025 }
1026 break; 1075 break;
1027 case NETDEV_REBOOT: 1076 case NETDEV_REBOOT:
1028 case NETDEV_CHANGE: 1077 case NETDEV_CHANGE:
@@ -1049,7 +1098,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1049 in_cache_entry *entry; 1098 in_cache_entry *entry;
1050 1099
1051 entry = mpc->in_ops->get(dst_ip, mpc); 1100 entry = mpc->in_ops->get(dst_ip, mpc);
1052 if(entry == NULL){ 1101 if (entry == NULL) {
1053 entry = mpc->in_ops->add_entry(dst_ip, mpc); 1102 entry = mpc->in_ops->add_entry(dst_ip, mpc);
1054 entry->entry_state = INGRESS_RESOLVING; 1103 entry->entry_state = INGRESS_RESOLVING;
1055 msg->type = SND_MPOA_RES_RQST; 1104 msg->type = SND_MPOA_RES_RQST;
@@ -1060,7 +1109,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1060 return; 1109 return;
1061 } 1110 }
1062 1111
1063 if(entry->entry_state == INGRESS_INVALID){ 1112 if (entry->entry_state == INGRESS_INVALID) {
1064 entry->entry_state = INGRESS_RESOLVING; 1113 entry->entry_state = INGRESS_RESOLVING;
1065 msg->type = SND_MPOA_RES_RQST; 1114 msg->type = SND_MPOA_RES_RQST;
1066 msg->content.in_info = entry->ctrl_info; 1115 msg->content.in_info = entry->ctrl_info;
@@ -1070,7 +1119,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1070 return; 1119 return;
1071 } 1120 }
1072 1121
1073 printk("mpoa: (%s) MPOA_trigger_rcvd: entry already in resolving state\n", 1122 pr_info("(%s) entry already in resolving state\n",
1074 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1123 (mpc->dev) ? mpc->dev->name : "<unknown>");
1075 mpc->in_ops->put(entry); 1124 mpc->in_ops->put(entry);
1076 return; 1125 return;
@@ -1080,23 +1129,25 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1080 * Things get complicated because we have to check if there's an egress 1129 * Things get complicated because we have to check if there's an egress
1081 * shortcut with suitable traffic parameters we could use. 1130 * shortcut with suitable traffic parameters we could use.
1082 */ 1131 */
1083static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) 1132static void check_qos_and_open_shortcut(struct k_message *msg,
1133 struct mpoa_client *client,
1134 in_cache_entry *entry)
1084{ 1135{
1085 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1136 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1086 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip); 1137 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
1087 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client); 1138 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
1088 1139
1089 if(eg_entry && eg_entry->shortcut){ 1140 if (eg_entry && eg_entry->shortcut) {
1090 if(eg_entry->shortcut->qos.txtp.traffic_class & 1141 if (eg_entry->shortcut->qos.txtp.traffic_class &
1091 msg->qos.txtp.traffic_class & 1142 msg->qos.txtp.traffic_class &
1092 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)){ 1143 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) {
1093 if(eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR) 1144 if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
1094 entry->shortcut = eg_entry->shortcut; 1145 entry->shortcut = eg_entry->shortcut;
1095 else if(eg_entry->shortcut->qos.txtp.max_pcr > 0) 1146 else if (eg_entry->shortcut->qos.txtp.max_pcr > 0)
1096 entry->shortcut = eg_entry->shortcut; 1147 entry->shortcut = eg_entry->shortcut;
1097 } 1148 }
1098 if(entry->shortcut){ 1149 if (entry->shortcut) {
1099 dprintk("mpoa: (%s) using egress SVC to reach %pI4\n", 1150 dprintk("(%s) using egress SVC to reach %pI4\n",
1100 client->dev->name, &dst_ip); 1151 client->dev->name, &dst_ip);
1101 client->eg_ops->put(eg_entry); 1152 client->eg_ops->put(eg_entry);
1102 return; 1153 return;
@@ -1107,12 +1158,13 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1107 1158
1108 /* No luck in the egress cache we must open an ingress SVC */ 1159 /* No luck in the egress cache we must open an ingress SVC */
1109 msg->type = OPEN_INGRESS_SVC; 1160 msg->type = OPEN_INGRESS_SVC;
1110 if (qos && (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) 1161 if (qos &&
1111 { 1162 (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) {
1112 msg->qos = qos->qos; 1163 msg->qos = qos->qos;
1113 printk("mpoa: (%s) trying to get a CBR shortcut\n",client->dev->name); 1164 pr_info("(%s) trying to get a CBR shortcut\n",
1114 } 1165 client->dev->name);
1115 else memset(&msg->qos,0,sizeof(struct atm_qos)); 1166 } else
1167 memset(&msg->qos, 0, sizeof(struct atm_qos));
1116 msg_to_mpoad(msg, client); 1168 msg_to_mpoad(msg, client);
1117 return; 1169 return;
1118} 1170}
@@ -1122,17 +1174,19 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1122 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1174 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1123 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); 1175 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
1124 1176
1125 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %pI4\n", 1177 dprintk("(%s) ip %pI4\n",
1126 mpc->dev->name, &dst_ip); 1178 mpc->dev->name, &dst_ip);
1127 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry); 1179 ddprintk("(%s) entry = %p",
1128 if(entry == NULL){ 1180 mpc->dev->name, entry);
1129 printk("\nmpoa: (%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name); 1181 if (entry == NULL) {
1182 pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n",
1183 mpc->dev->name);
1130 return; 1184 return;
1131 } 1185 }
1132 ddprintk(" entry_state = %d ", entry->entry_state); 1186 ddprintk_cont(" entry_state = %d ", entry->entry_state);
1133 1187
1134 if (entry->entry_state == INGRESS_RESOLVED) { 1188 if (entry->entry_state == INGRESS_RESOLVED) {
1135 printk("\nmpoa: (%s) MPOA_res_reply_rcvd for RESOLVED entry!\n", mpc->dev->name); 1189 pr_info("(%s) RESOLVED entry!\n", mpc->dev->name);
1136 mpc->in_ops->put(entry); 1190 mpc->in_ops->put(entry);
1137 return; 1191 return;
1138 } 1192 }
@@ -1141,17 +1195,18 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1141 do_gettimeofday(&(entry->tv)); 1195 do_gettimeofday(&(entry->tv));
1142 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */ 1196 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
1143 entry->refresh_time = 0; 1197 entry->refresh_time = 0;
1144 ddprintk("entry->shortcut = %p\n", entry->shortcut); 1198 ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
1145 1199
1146 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL){ 1200 if (entry->entry_state == INGRESS_RESOLVING &&
1201 entry->shortcut != NULL) {
1147 entry->entry_state = INGRESS_RESOLVED; 1202 entry->entry_state = INGRESS_RESOLVED;
1148 mpc->in_ops->put(entry); 1203 mpc->in_ops->put(entry);
1149 return; /* Shortcut already open... */ 1204 return; /* Shortcut already open... */
1150 } 1205 }
1151 1206
1152 if (entry->shortcut != NULL) { 1207 if (entry->shortcut != NULL) {
1153 printk("mpoa: (%s) MPOA_res_reply_rcvd: entry->shortcut != NULL, impossible!\n", 1208 pr_info("(%s) entry->shortcut != NULL, impossible!\n",
1154 mpc->dev->name); 1209 mpc->dev->name);
1155 mpc->in_ops->put(entry); 1210 mpc->in_ops->put(entry);
1156 return; 1211 return;
1157 } 1212 }
@@ -1170,14 +1225,14 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1170 __be32 mask = msg->ip_mask; 1225 __be32 mask = msg->ip_mask;
1171 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); 1226 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
1172 1227
1173 if(entry == NULL){ 1228 if (entry == NULL) {
1174 printk("mpoa: (%s) ingress_purge_rcvd: purge for a non-existing entry, ip = %pI4\n", 1229 pr_info("(%s) purge for a non-existing entry, ip = %pI4\n",
1175 mpc->dev->name, &dst_ip); 1230 mpc->dev->name, &dst_ip);
1176 return; 1231 return;
1177 } 1232 }
1178 1233
1179 do { 1234 do {
1180 dprintk("mpoa: (%s) ingress_purge_rcvd: removing an ingress entry, ip = %pI4\n", 1235 dprintk("(%s) removing an ingress entry, ip = %pI4\n",
1181 mpc->dev->name, &dst_ip); 1236 mpc->dev->name, &dst_ip);
1182 write_lock_bh(&mpc->ingress_lock); 1237 write_lock_bh(&mpc->ingress_lock);
1183 mpc->in_ops->remove_entry(entry, mpc); 1238 mpc->in_ops->remove_entry(entry, mpc);
@@ -1195,7 +1250,8 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1195 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); 1250 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
1196 1251
1197 if (entry == NULL) { 1252 if (entry == NULL) {
1198 dprintk("mpoa: (%s) egress_purge_rcvd: purge for a non-existing entry\n", mpc->dev->name); 1253 dprintk("(%s) purge for a non-existing entry\n",
1254 mpc->dev->name);
1199 return; 1255 return;
1200 } 1256 }
1201 1257
@@ -1214,15 +1270,15 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1214 struct k_message *purge_msg; 1270 struct k_message *purge_msg;
1215 struct sk_buff *skb; 1271 struct sk_buff *skb;
1216 1272
1217 dprintk("mpoa: purge_egress_shortcut: entering\n"); 1273 dprintk("entering\n");
1218 if (vcc == NULL) { 1274 if (vcc == NULL) {
1219 printk("mpoa: purge_egress_shortcut: vcc == NULL\n"); 1275 pr_info("vcc == NULL\n");
1220 return; 1276 return;
1221 } 1277 }
1222 1278
1223 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC); 1279 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
1224 if (skb == NULL) { 1280 if (skb == NULL) {
1225 printk("mpoa: purge_egress_shortcut: out of memory\n"); 1281 pr_info("out of memory\n");
1226 return; 1282 return;
1227 } 1283 }
1228 1284
@@ -1238,7 +1294,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1238 sk = sk_atm(vcc); 1294 sk = sk_atm(vcc);
1239 skb_queue_tail(&sk->sk_receive_queue, skb); 1295 skb_queue_tail(&sk->sk_receive_queue, skb);
1240 sk->sk_data_ready(sk, skb->len); 1296 sk->sk_data_ready(sk, skb->len);
1241 dprintk("mpoa: purge_egress_shortcut: exiting:\n"); 1297 dprintk("exiting\n");
1242 1298
1243 return; 1299 return;
1244} 1300}
@@ -1247,14 +1303,14 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1247 * Our MPS died. Tell our daemon to send NHRP data plane purge to each 1303 * Our MPS died. Tell our daemon to send NHRP data plane purge to each
1248 * of the egress shortcuts we have. 1304 * of the egress shortcuts we have.
1249 */ 1305 */
1250static void mps_death( struct k_message * msg, struct mpoa_client * mpc ) 1306static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
1251{ 1307{
1252 eg_cache_entry *entry; 1308 eg_cache_entry *entry;
1253 1309
1254 dprintk("mpoa: (%s) mps_death:\n", mpc->dev->name); 1310 dprintk("(%s)\n", mpc->dev->name);
1255 1311
1256 if(memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)){ 1312 if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) {
1257 printk("mpoa: (%s) mps_death: wrong MPS\n", mpc->dev->name); 1313 pr_info("(%s) wrong MPS\n", mpc->dev->name);
1258 return; 1314 return;
1259 } 1315 }
1260 1316
@@ -1273,20 +1329,21 @@ static void mps_death( struct k_message * msg, struct mpoa_client * mpc )
1273 return; 1329 return;
1274} 1330}
1275 1331
1276static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client * mpc) 1332static void MPOA_cache_impos_rcvd(struct k_message *msg,
1333 struct mpoa_client *mpc)
1277{ 1334{
1278 uint16_t holding_time; 1335 uint16_t holding_time;
1279 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); 1336 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
1280 1337
1281 holding_time = msg->content.eg_info.holding_time; 1338 holding_time = msg->content.eg_info.holding_time;
1282 dprintk("mpoa: (%s) MPOA_cache_impos_rcvd: entry = %p, holding_time = %u\n", 1339 dprintk("(%s) entry = %p, holding_time = %u\n",
1283 mpc->dev->name, entry, holding_time); 1340 mpc->dev->name, entry, holding_time);
1284 if(entry == NULL && holding_time) { 1341 if (entry == NULL && holding_time) {
1285 entry = mpc->eg_ops->add_entry(msg, mpc); 1342 entry = mpc->eg_ops->add_entry(msg, mpc);
1286 mpc->eg_ops->put(entry); 1343 mpc->eg_ops->put(entry);
1287 return; 1344 return;
1288 } 1345 }
1289 if(holding_time){ 1346 if (holding_time) {
1290 mpc->eg_ops->update(entry, holding_time); 1347 mpc->eg_ops->update(entry, holding_time);
1291 return; 1348 return;
1292 } 1349 }
@@ -1300,7 +1357,8 @@ static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client *
1300 return; 1357 return;
1301} 1358}
1302 1359
1303static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc) 1360static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
1361 struct mpoa_client *mpc)
1304{ 1362{
1305 struct lec_priv *priv; 1363 struct lec_priv *priv;
1306 int i, retval ; 1364 int i, retval ;
@@ -1315,34 +1373,39 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *m
1315 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */ 1373 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */
1316 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN); 1374 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN);
1317 1375
1318 dprintk("mpoa: (%s) setting MPC ctrl ATM address to ", 1376 dprintk("(%s) setting MPC ctrl ATM address to",
1319 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1377 mpc->dev ? mpc->dev->name : "<unknown>");
1320 for (i = 7; i < sizeof(tlv); i++) 1378 for (i = 7; i < sizeof(tlv); i++)
1321 dprintk("%02x ", tlv[i]); 1379 dprintk_cont(" %02x", tlv[i]);
1322 dprintk("\n"); 1380 dprintk_cont("\n");
1323 1381
1324 if (mpc->dev) { 1382 if (mpc->dev) {
1325 priv = netdev_priv(mpc->dev); 1383 priv = netdev_priv(mpc->dev);
1326 retval = priv->lane2_ops->associate_req(mpc->dev, mpc->dev->dev_addr, tlv, sizeof(tlv)); 1384 retval = priv->lane2_ops->associate_req(mpc->dev,
1385 mpc->dev->dev_addr,
1386 tlv, sizeof(tlv));
1327 if (retval == 0) 1387 if (retval == 0)
1328 printk("mpoa: (%s) MPOA device type TLV association failed\n", mpc->dev->name); 1388 pr_info("(%s) MPOA device type TLV association failed\n",
1389 mpc->dev->name);
1329 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL); 1390 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL);
1330 if (retval < 0) 1391 if (retval < 0)
1331 printk("mpoa: (%s) targetless LE_ARP request failed\n", mpc->dev->name); 1392 pr_info("(%s) targetless LE_ARP request failed\n",
1393 mpc->dev->name);
1332 } 1394 }
1333 1395
1334 return; 1396 return;
1335} 1397}
1336 1398
1337static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *client) 1399static void set_mps_mac_addr_rcvd(struct k_message *msg,
1400 struct mpoa_client *client)
1338{ 1401{
1339 1402
1340 if(client->number_of_mps_macs) 1403 if (client->number_of_mps_macs)
1341 kfree(client->mps_macs); 1404 kfree(client->mps_macs);
1342 client->number_of_mps_macs = 0; 1405 client->number_of_mps_macs = 0;
1343 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL); 1406 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
1344 if (client->mps_macs == NULL) { 1407 if (client->mps_macs == NULL) {
1345 printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n"); 1408 pr_info("out of memory\n");
1346 return; 1409 return;
1347 } 1410 }
1348 client->number_of_mps_macs = 1; 1411 client->number_of_mps_macs = 1;
@@ -1363,11 +1426,11 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
1363 /* FIXME: This knows too much of the cache structure */ 1426 /* FIXME: This knows too much of the cache structure */
1364 read_lock_irq(&mpc->egress_lock); 1427 read_lock_irq(&mpc->egress_lock);
1365 entry = mpc->eg_cache; 1428 entry = mpc->eg_cache;
1366 while (entry != NULL){ 1429 while (entry != NULL) {
1367 msg->content.eg_info = entry->ctrl_info; 1430 msg->content.eg_info = entry->ctrl_info;
1368 dprintk("mpoa: cache_id %u\n", entry->ctrl_info.cache_id); 1431 dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
1369 msg_to_mpoad(msg, mpc); 1432 msg_to_mpoad(msg, mpc);
1370 entry = entry->next; 1433 entry = entry->next;
1371 } 1434 }
1372 read_unlock_irq(&mpc->egress_lock); 1435 read_unlock_irq(&mpc->egress_lock);
1373 1436
@@ -1386,20 +1449,22 @@ static void mpc_timer_refresh(void)
1386 return; 1449 return;
1387} 1450}
1388 1451
1389static void mpc_cache_check( unsigned long checking_time ) 1452static void mpc_cache_check(unsigned long checking_time)
1390{ 1453{
1391 struct mpoa_client *mpc = mpcs; 1454 struct mpoa_client *mpc = mpcs;
1392 static unsigned long previous_resolving_check_time; 1455 static unsigned long previous_resolving_check_time;
1393 static unsigned long previous_refresh_time; 1456 static unsigned long previous_refresh_time;
1394 1457
1395 while( mpc != NULL ){ 1458 while (mpc != NULL) {
1396 mpc->in_ops->clear_count(mpc); 1459 mpc->in_ops->clear_count(mpc);
1397 mpc->eg_ops->clear_expired(mpc); 1460 mpc->eg_ops->clear_expired(mpc);
1398 if(checking_time - previous_resolving_check_time > mpc->parameters.mpc_p4 * HZ ){ 1461 if (checking_time - previous_resolving_check_time >
1462 mpc->parameters.mpc_p4 * HZ) {
1399 mpc->in_ops->check_resolving(mpc); 1463 mpc->in_ops->check_resolving(mpc);
1400 previous_resolving_check_time = checking_time; 1464 previous_resolving_check_time = checking_time;
1401 } 1465 }
1402 if(checking_time - previous_refresh_time > mpc->parameters.mpc_p5 * HZ ){ 1466 if (checking_time - previous_refresh_time >
1467 mpc->parameters.mpc_p5 * HZ) {
1403 mpc->in_ops->refresh(mpc); 1468 mpc->in_ops->refresh(mpc);
1404 previous_refresh_time = checking_time; 1469 previous_refresh_time = checking_time;
1405 } 1470 }
@@ -1410,7 +1475,8 @@ static void mpc_cache_check( unsigned long checking_time )
1410 return; 1475 return;
1411} 1476}
1412 1477
1413static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1478static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
1479 unsigned long arg)
1414{ 1480{
1415 int err = 0; 1481 int err = 0;
1416 struct atm_vcc *vcc = ATM_SD(sock); 1482 struct atm_vcc *vcc = ATM_SD(sock);
@@ -1422,21 +1488,20 @@ static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
1422 return -EPERM; 1488 return -EPERM;
1423 1489
1424 switch (cmd) { 1490 switch (cmd) {
1425 case ATMMPC_CTRL: 1491 case ATMMPC_CTRL:
1426 err = atm_mpoa_mpoad_attach(vcc, (int)arg); 1492 err = atm_mpoa_mpoad_attach(vcc, (int)arg);
1427 if (err >= 0) 1493 if (err >= 0)
1428 sock->state = SS_CONNECTED; 1494 sock->state = SS_CONNECTED;
1429 break; 1495 break;
1430 case ATMMPC_DATA: 1496 case ATMMPC_DATA:
1431 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg); 1497 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
1432 break; 1498 break;
1433 default: 1499 default:
1434 break; 1500 break;
1435 } 1501 }
1436 return err; 1502 return err;
1437} 1503}
1438 1504
1439
1440static struct atm_ioctl atm_ioctl_ops = { 1505static struct atm_ioctl atm_ioctl_ops = {
1441 .owner = THIS_MODULE, 1506 .owner = THIS_MODULE,
1442 .ioctl = atm_mpoa_ioctl, 1507 .ioctl = atm_mpoa_ioctl,
@@ -1447,9 +1512,9 @@ static __init int atm_mpoa_init(void)
1447 register_atm_ioctl(&atm_ioctl_ops); 1512 register_atm_ioctl(&atm_ioctl_ops);
1448 1513
1449 if (mpc_proc_init() != 0) 1514 if (mpc_proc_init() != 0)
1450 printk(KERN_INFO "mpoa: failed to initialize /proc/mpoa\n"); 1515 pr_info("failed to initialize /proc/mpoa\n");
1451 1516
1452 printk("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); 1517 pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
1453 1518
1454 return 0; 1519 return 0;
1455} 1520}
@@ -1476,15 +1541,15 @@ static void __exit atm_mpoa_cleanup(void)
1476 if (priv->lane2_ops != NULL) 1541 if (priv->lane2_ops != NULL)
1477 priv->lane2_ops->associate_indicator = NULL; 1542 priv->lane2_ops->associate_indicator = NULL;
1478 } 1543 }
1479 ddprintk("mpoa: cleanup_module: about to clear caches\n"); 1544 ddprintk("about to clear caches\n");
1480 mpc->in_ops->destroy_cache(mpc); 1545 mpc->in_ops->destroy_cache(mpc);
1481 mpc->eg_ops->destroy_cache(mpc); 1546 mpc->eg_ops->destroy_cache(mpc);
1482 ddprintk("mpoa: cleanup_module: caches cleared\n"); 1547 ddprintk("caches cleared\n");
1483 kfree(mpc->mps_macs); 1548 kfree(mpc->mps_macs);
1484 memset(mpc, 0, sizeof(struct mpoa_client)); 1549 memset(mpc, 0, sizeof(struct mpoa_client));
1485 ddprintk("mpoa: cleanup_module: about to kfree %p\n", mpc); 1550 ddprintk("about to kfree %p\n", mpc);
1486 kfree(mpc); 1551 kfree(mpc);
1487 ddprintk("mpoa: cleanup_module: next mpc is at %p\n", tmp); 1552 ddprintk("next mpc is at %p\n", tmp);
1488 mpc = tmp; 1553 mpc = tmp;
1489 } 1554 }
1490 1555
@@ -1492,7 +1557,7 @@ static void __exit atm_mpoa_cleanup(void)
1492 qos_head = NULL; 1557 qos_head = NULL;
1493 while (qos != NULL) { 1558 while (qos != NULL) {
1494 nextqos = qos->next; 1559 nextqos = qos->next;
1495 dprintk("mpoa: cleanup_module: freeing qos entry %p\n", qos); 1560 dprintk("freeing qos entry %p\n", qos);
1496 kfree(qos); 1561 kfree(qos);
1497 qos = nextqos; 1562 qos = nextqos;
1498 } 1563 }
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 4504a4b339bb..e773d8336918 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -1,5 +1,6 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/atmmpc.h> 2#include <linux/atmmpc.h>
3#include <linux/slab.h>
3#include <linux/time.h> 4#include <linux/time.h>
4 5
5#include "mpoa_caches.h" 6#include "mpoa_caches.h"
@@ -11,15 +12,23 @@
11 */ 12 */
12 13
13#if 0 14#if 0
14#define dprintk printk /* debug */ 15#define dprintk(format, args...) \
16 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
15#else 17#else
16#define dprintk(format,args...) 18#define dprintk(format, args...) \
19 do { if (0) \
20 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
21 } while (0)
17#endif 22#endif
18 23
19#if 0 24#if 0
20#define ddprintk printk /* more debug */ 25#define ddprintk(format, args...) \
26 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
21#else 27#else
22#define ddprintk(format,args...) 28#define ddprintk(format, args...) \
29 do { if (0) \
30 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
31 } while (0)
23#endif 32#endif
24 33
25static in_cache_entry *in_cache_get(__be32 dst_ip, 34static in_cache_entry *in_cache_get(__be32 dst_ip,
@@ -29,8 +38,8 @@ static in_cache_entry *in_cache_get(__be32 dst_ip,
29 38
30 read_lock_bh(&client->ingress_lock); 39 read_lock_bh(&client->ingress_lock);
31 entry = client->in_cache; 40 entry = client->in_cache;
32 while(entry != NULL){ 41 while (entry != NULL) {
33 if( entry->ctrl_info.in_dst_ip == dst_ip ){ 42 if (entry->ctrl_info.in_dst_ip == dst_ip) {
34 atomic_inc(&entry->use); 43 atomic_inc(&entry->use);
35 read_unlock_bh(&client->ingress_lock); 44 read_unlock_bh(&client->ingress_lock);
36 return entry; 45 return entry;
@@ -50,8 +59,8 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
50 59
51 read_lock_bh(&client->ingress_lock); 60 read_lock_bh(&client->ingress_lock);
52 entry = client->in_cache; 61 entry = client->in_cache;
53 while(entry != NULL){ 62 while (entry != NULL) {
54 if((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask )){ 63 if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
55 atomic_inc(&entry->use); 64 atomic_inc(&entry->use);
56 read_unlock_bh(&client->ingress_lock); 65 read_unlock_bh(&client->ingress_lock);
57 return entry; 66 return entry;
@@ -65,14 +74,14 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
65} 74}
66 75
67static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc, 76static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
68 struct mpoa_client *client ) 77 struct mpoa_client *client)
69{ 78{
70 in_cache_entry *entry; 79 in_cache_entry *entry;
71 80
72 read_lock_bh(&client->ingress_lock); 81 read_lock_bh(&client->ingress_lock);
73 entry = client->in_cache; 82 entry = client->in_cache;
74 while(entry != NULL){ 83 while (entry != NULL) {
75 if(entry->shortcut == vcc) { 84 if (entry->shortcut == vcc) {
76 atomic_inc(&entry->use); 85 atomic_inc(&entry->use);
77 read_unlock_bh(&client->ingress_lock); 86 read_unlock_bh(&client->ingress_lock);
78 return entry; 87 return entry;
@@ -90,14 +99,14 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
90 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); 99 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
91 100
92 if (entry == NULL) { 101 if (entry == NULL) {
93 printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n"); 102 pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
94 return NULL; 103 return NULL;
95 } 104 }
96 105
97 dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %pI4\n", &dst_ip); 106 dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
98 107
99 atomic_set(&entry->use, 1); 108 atomic_set(&entry->use, 1);
100 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n"); 109 dprintk("new_in_cache_entry: about to lock\n");
101 write_lock_bh(&client->ingress_lock); 110 write_lock_bh(&client->ingress_lock);
102 entry->next = client->in_cache; 111 entry->next = client->in_cache;
103 entry->prev = NULL; 112 entry->prev = NULL;
@@ -115,7 +124,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
115 atomic_inc(&entry->use); 124 atomic_inc(&entry->use);
116 125
117 write_unlock_bh(&client->ingress_lock); 126 write_unlock_bh(&client->ingress_lock);
118 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: unlocked\n"); 127 dprintk("new_in_cache_entry: unlocked\n");
119 128
120 return entry; 129 return entry;
121} 130}
@@ -126,39 +135,41 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
126 struct k_message msg; 135 struct k_message msg;
127 136
128 entry->count++; 137 entry->count++;
129 if(entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL) 138 if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
130 return OPEN; 139 return OPEN;
131 140
132 if(entry->entry_state == INGRESS_REFRESHING){ 141 if (entry->entry_state == INGRESS_REFRESHING) {
133 if(entry->count > mpc->parameters.mpc_p1){ 142 if (entry->count > mpc->parameters.mpc_p1) {
134 msg.type = SND_MPOA_RES_RQST; 143 msg.type = SND_MPOA_RES_RQST;
135 msg.content.in_info = entry->ctrl_info; 144 msg.content.in_info = entry->ctrl_info;
136 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN); 145 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
137 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 146 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
138 if (qos != NULL) msg.qos = qos->qos; 147 if (qos != NULL)
148 msg.qos = qos->qos;
139 msg_to_mpoad(&msg, mpc); 149 msg_to_mpoad(&msg, mpc);
140 do_gettimeofday(&(entry->reply_wait)); 150 do_gettimeofday(&(entry->reply_wait));
141 entry->entry_state = INGRESS_RESOLVING; 151 entry->entry_state = INGRESS_RESOLVING;
142 } 152 }
143 if(entry->shortcut != NULL) 153 if (entry->shortcut != NULL)
144 return OPEN; 154 return OPEN;
145 return CLOSED; 155 return CLOSED;
146 } 156 }
147 157
148 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL) 158 if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
149 return OPEN; 159 return OPEN;
150 160
151 if( entry->count > mpc->parameters.mpc_p1 && 161 if (entry->count > mpc->parameters.mpc_p1 &&
152 entry->entry_state == INGRESS_INVALID){ 162 entry->entry_state == INGRESS_INVALID) {
153 dprintk("mpoa: (%s) mpoa_caches.c: threshold exceeded for ip %pI4, sending MPOA res req\n", 163 dprintk("(%s) threshold exceeded for ip %pI4, sending MPOA res req\n",
154 mpc->dev->name, &entry->ctrl_info.in_dst_ip); 164 mpc->dev->name, &entry->ctrl_info.in_dst_ip);
155 entry->entry_state = INGRESS_RESOLVING; 165 entry->entry_state = INGRESS_RESOLVING;
156 msg.type = SND_MPOA_RES_RQST; 166 msg.type = SND_MPOA_RES_RQST;
157 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN ); 167 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
158 msg.content.in_info = entry->ctrl_info; 168 msg.content.in_info = entry->ctrl_info;
159 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 169 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
160 if (qos != NULL) msg.qos = qos->qos; 170 if (qos != NULL)
161 msg_to_mpoad( &msg, mpc); 171 msg.qos = qos->qos;
172 msg_to_mpoad(&msg, mpc);
162 do_gettimeofday(&(entry->reply_wait)); 173 do_gettimeofday(&(entry->reply_wait));
163 } 174 }
164 175
@@ -185,7 +196,7 @@ static void in_cache_remove_entry(in_cache_entry *entry,
185 struct k_message msg; 196 struct k_message msg;
186 197
187 vcc = entry->shortcut; 198 vcc = entry->shortcut;
188 dprintk("mpoa: mpoa_caches.c: removing an ingress entry, ip = %pI4\n", 199 dprintk("removing an ingress entry, ip = %pI4\n",
189 &entry->ctrl_info.in_dst_ip); 200 &entry->ctrl_info.in_dst_ip);
190 201
191 if (entry->prev != NULL) 202 if (entry->prev != NULL)
@@ -195,14 +206,15 @@ static void in_cache_remove_entry(in_cache_entry *entry,
195 if (entry->next != NULL) 206 if (entry->next != NULL)
196 entry->next->prev = entry->prev; 207 entry->next->prev = entry->prev;
197 client->in_ops->put(entry); 208 client->in_ops->put(entry);
198 if(client->in_cache == NULL && client->eg_cache == NULL){ 209 if (client->in_cache == NULL && client->eg_cache == NULL) {
199 msg.type = STOP_KEEP_ALIVE_SM; 210 msg.type = STOP_KEEP_ALIVE_SM;
200 msg_to_mpoad(&msg,client); 211 msg_to_mpoad(&msg, client);
201 } 212 }
202 213
203 /* Check if the egress side still uses this VCC */ 214 /* Check if the egress side still uses this VCC */
204 if (vcc != NULL) { 215 if (vcc != NULL) {
205 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc, client); 216 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc,
217 client);
206 if (eg_entry != NULL) { 218 if (eg_entry != NULL) {
207 client->eg_ops->put(eg_entry); 219 client->eg_ops->put(eg_entry);
208 return; 220 return;
@@ -213,7 +225,6 @@ static void in_cache_remove_entry(in_cache_entry *entry,
213 return; 225 return;
214} 226}
215 227
216
217/* Call this every MPC-p2 seconds... Not exactly correct solution, 228/* Call this every MPC-p2 seconds... Not exactly correct solution,
218 but an easy one... */ 229 but an easy one... */
219static void clear_count_and_expired(struct mpoa_client *client) 230static void clear_count_and_expired(struct mpoa_client *client)
@@ -225,12 +236,12 @@ static void clear_count_and_expired(struct mpoa_client *client)
225 236
226 write_lock_bh(&client->ingress_lock); 237 write_lock_bh(&client->ingress_lock);
227 entry = client->in_cache; 238 entry = client->in_cache;
228 while(entry != NULL){ 239 while (entry != NULL) {
229 entry->count=0; 240 entry->count = 0;
230 next_entry = entry->next; 241 next_entry = entry->next;
231 if((now.tv_sec - entry->tv.tv_sec) 242 if ((now.tv_sec - entry->tv.tv_sec)
232 > entry->ctrl_info.holding_time){ 243 > entry->ctrl_info.holding_time) {
233 dprintk("mpoa: mpoa_caches.c: holding time expired, ip = %pI4\n", 244 dprintk("holding time expired, ip = %pI4\n",
234 &entry->ctrl_info.in_dst_ip); 245 &entry->ctrl_info.in_dst_ip);
235 client->in_ops->remove_entry(entry, client); 246 client->in_ops->remove_entry(entry, client);
236 } 247 }
@@ -250,33 +261,38 @@ static void check_resolving_entries(struct mpoa_client *client)
250 struct timeval now; 261 struct timeval now;
251 struct k_message msg; 262 struct k_message msg;
252 263
253 do_gettimeofday( &now ); 264 do_gettimeofday(&now);
254 265
255 read_lock_bh(&client->ingress_lock); 266 read_lock_bh(&client->ingress_lock);
256 entry = client->in_cache; 267 entry = client->in_cache;
257 while( entry != NULL ){ 268 while (entry != NULL) {
258 if(entry->entry_state == INGRESS_RESOLVING){ 269 if (entry->entry_state == INGRESS_RESOLVING) {
259 if(now.tv_sec - entry->hold_down.tv_sec < client->parameters.mpc_p6){ 270 if ((now.tv_sec - entry->hold_down.tv_sec) <
260 entry = entry->next; /* Entry in hold down */ 271 client->parameters.mpc_p6) {
272 entry = entry->next; /* Entry in hold down */
261 continue; 273 continue;
262 } 274 }
263 if( (now.tv_sec - entry->reply_wait.tv_sec) > 275 if ((now.tv_sec - entry->reply_wait.tv_sec) >
264 entry->retry_time ){ 276 entry->retry_time) {
265 entry->retry_time = MPC_C1*( entry->retry_time ); 277 entry->retry_time = MPC_C1 * (entry->retry_time);
266 if(entry->retry_time > client->parameters.mpc_p5){ 278 /*
267 /* Retry time maximum exceeded, put entry in hold down. */ 279 * Retry time maximum exceeded,
280 * put entry in hold down.
281 */
282 if (entry->retry_time > client->parameters.mpc_p5) {
268 do_gettimeofday(&(entry->hold_down)); 283 do_gettimeofday(&(entry->hold_down));
269 entry->retry_time = client->parameters.mpc_p4; 284 entry->retry_time = client->parameters.mpc_p4;
270 entry = entry->next; 285 entry = entry->next;
271 continue; 286 continue;
272 } 287 }
273 /* Ask daemon to send a resolution request. */ 288 /* Ask daemon to send a resolution request. */
274 memset(&(entry->hold_down),0,sizeof(struct timeval)); 289 memset(&(entry->hold_down), 0, sizeof(struct timeval));
275 msg.type = SND_MPOA_RES_RTRY; 290 msg.type = SND_MPOA_RES_RTRY;
276 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN); 291 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
277 msg.content.in_info = entry->ctrl_info; 292 msg.content.in_info = entry->ctrl_info;
278 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 293 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
279 if (qos != NULL) msg.qos = qos->qos; 294 if (qos != NULL)
295 msg.qos = qos->qos;
280 msg_to_mpoad(&msg, client); 296 msg_to_mpoad(&msg, client);
281 do_gettimeofday(&(entry->reply_wait)); 297 do_gettimeofday(&(entry->reply_wait));
282 } 298 }
@@ -292,16 +308,17 @@ static void refresh_entries(struct mpoa_client *client)
292 struct timeval now; 308 struct timeval now;
293 struct in_cache_entry *entry = client->in_cache; 309 struct in_cache_entry *entry = client->in_cache;
294 310
295 ddprintk("mpoa: mpoa_caches.c: refresh_entries\n"); 311 ddprintk("refresh_entries\n");
296 do_gettimeofday(&now); 312 do_gettimeofday(&now);
297 313
298 read_lock_bh(&client->ingress_lock); 314 read_lock_bh(&client->ingress_lock);
299 while( entry != NULL ){ 315 while (entry != NULL) {
300 if( entry->entry_state == INGRESS_RESOLVED ){ 316 if (entry->entry_state == INGRESS_RESOLVED) {
301 if(!(entry->refresh_time)) 317 if (!(entry->refresh_time))
302 entry->refresh_time = (2*(entry->ctrl_info.holding_time))/3; 318 entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
303 if( (now.tv_sec - entry->reply_wait.tv_sec) > entry->refresh_time ){ 319 if ((now.tv_sec - entry->reply_wait.tv_sec) >
304 dprintk("mpoa: mpoa_caches.c: refreshing an entry.\n"); 320 entry->refresh_time) {
321 dprintk("refreshing an entry.\n");
305 entry->entry_state = INGRESS_REFRESHING; 322 entry->entry_state = INGRESS_REFRESHING;
306 323
307 } 324 }
@@ -314,21 +331,22 @@ static void refresh_entries(struct mpoa_client *client)
314static void in_destroy_cache(struct mpoa_client *mpc) 331static void in_destroy_cache(struct mpoa_client *mpc)
315{ 332{
316 write_lock_irq(&mpc->ingress_lock); 333 write_lock_irq(&mpc->ingress_lock);
317 while(mpc->in_cache != NULL) 334 while (mpc->in_cache != NULL)
318 mpc->in_ops->remove_entry(mpc->in_cache, mpc); 335 mpc->in_ops->remove_entry(mpc->in_cache, mpc);
319 write_unlock_irq(&mpc->ingress_lock); 336 write_unlock_irq(&mpc->ingress_lock);
320 337
321 return; 338 return;
322} 339}
323 340
324static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc) 341static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
342 struct mpoa_client *mpc)
325{ 343{
326 eg_cache_entry *entry; 344 eg_cache_entry *entry;
327 345
328 read_lock_irq(&mpc->egress_lock); 346 read_lock_irq(&mpc->egress_lock);
329 entry = mpc->eg_cache; 347 entry = mpc->eg_cache;
330 while(entry != NULL){ 348 while (entry != NULL) {
331 if(entry->ctrl_info.cache_id == cache_id){ 349 if (entry->ctrl_info.cache_id == cache_id) {
332 atomic_inc(&entry->use); 350 atomic_inc(&entry->use);
333 read_unlock_irq(&mpc->egress_lock); 351 read_unlock_irq(&mpc->egress_lock);
334 return entry; 352 return entry;
@@ -348,7 +366,7 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
348 366
349 read_lock_irqsave(&mpc->egress_lock, flags); 367 read_lock_irqsave(&mpc->egress_lock, flags);
350 entry = mpc->eg_cache; 368 entry = mpc->eg_cache;
351 while (entry != NULL){ 369 while (entry != NULL) {
352 if (entry->ctrl_info.tag == tag) { 370 if (entry->ctrl_info.tag == tag) {
353 atomic_inc(&entry->use); 371 atomic_inc(&entry->use);
354 read_unlock_irqrestore(&mpc->egress_lock, flags); 372 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -362,14 +380,15 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
362} 380}
363 381
364/* This can be called from any context since it saves CPU flags */ 382/* This can be called from any context since it saves CPU flags */
365static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_client *mpc) 383static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
384 struct mpoa_client *mpc)
366{ 385{
367 unsigned long flags; 386 unsigned long flags;
368 eg_cache_entry *entry; 387 eg_cache_entry *entry;
369 388
370 read_lock_irqsave(&mpc->egress_lock, flags); 389 read_lock_irqsave(&mpc->egress_lock, flags);
371 entry = mpc->eg_cache; 390 entry = mpc->eg_cache;
372 while (entry != NULL){ 391 while (entry != NULL) {
373 if (entry->shortcut == vcc) { 392 if (entry->shortcut == vcc) {
374 atomic_inc(&entry->use); 393 atomic_inc(&entry->use);
375 read_unlock_irqrestore(&mpc->egress_lock, flags); 394 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -382,14 +401,15 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_clie
382 return NULL; 401 return NULL;
383} 402}
384 403
385static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc) 404static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
405 struct mpoa_client *mpc)
386{ 406{
387 eg_cache_entry *entry; 407 eg_cache_entry *entry;
388 408
389 read_lock_irq(&mpc->egress_lock); 409 read_lock_irq(&mpc->egress_lock);
390 entry = mpc->eg_cache; 410 entry = mpc->eg_cache;
391 while(entry != NULL){ 411 while (entry != NULL) {
392 if(entry->latest_ip_addr == ipaddr) { 412 if (entry->latest_ip_addr == ipaddr) {
393 atomic_inc(&entry->use); 413 atomic_inc(&entry->use);
394 read_unlock_irq(&mpc->egress_lock); 414 read_unlock_irq(&mpc->egress_lock);
395 return entry; 415 return entry;
@@ -421,7 +441,7 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
421 struct k_message msg; 441 struct k_message msg;
422 442
423 vcc = entry->shortcut; 443 vcc = entry->shortcut;
424 dprintk("mpoa: mpoa_caches.c: removing an egress entry.\n"); 444 dprintk("removing an egress entry.\n");
425 if (entry->prev != NULL) 445 if (entry->prev != NULL)
426 entry->prev->next = entry->next; 446 entry->prev->next = entry->next;
427 else 447 else
@@ -429,9 +449,9 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
429 if (entry->next != NULL) 449 if (entry->next != NULL)
430 entry->next->prev = entry->prev; 450 entry->next->prev = entry->prev;
431 client->eg_ops->put(entry); 451 client->eg_ops->put(entry);
432 if(client->in_cache == NULL && client->eg_cache == NULL){ 452 if (client->in_cache == NULL && client->eg_cache == NULL) {
433 msg.type = STOP_KEEP_ALIVE_SM; 453 msg.type = STOP_KEEP_ALIVE_SM;
434 msg_to_mpoad(&msg,client); 454 msg_to_mpoad(&msg, client);
435 } 455 }
436 456
437 /* Check if the ingress side still uses this VCC */ 457 /* Check if the ingress side still uses this VCC */
@@ -447,20 +467,21 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
447 return; 467 return;
448} 468}
449 469
450static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client) 470static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
471 struct mpoa_client *client)
451{ 472{
452 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); 473 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
453 474
454 if (entry == NULL) { 475 if (entry == NULL) {
455 printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n"); 476 pr_info("out of memory\n");
456 return NULL; 477 return NULL;
457 } 478 }
458 479
459 dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %pI4, this should be our IP\n", 480 dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
460 &msg->content.eg_info.eg_dst_ip); 481 &msg->content.eg_info.eg_dst_ip);
461 482
462 atomic_set(&entry->use, 1); 483 atomic_set(&entry->use, 1);
463 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n"); 484 dprintk("new_eg_cache_entry: about to lock\n");
464 write_lock_irq(&client->egress_lock); 485 write_lock_irq(&client->egress_lock);
465 entry->next = client->eg_cache; 486 entry->next = client->eg_cache;
466 entry->prev = NULL; 487 entry->prev = NULL;
@@ -472,18 +493,18 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_cli
472 entry->ctrl_info = msg->content.eg_info; 493 entry->ctrl_info = msg->content.eg_info;
473 do_gettimeofday(&(entry->tv)); 494 do_gettimeofday(&(entry->tv));
474 entry->entry_state = EGRESS_RESOLVED; 495 entry->entry_state = EGRESS_RESOLVED;
475 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry cache_id %lu\n", ntohl(entry->ctrl_info.cache_id)); 496 dprintk("new_eg_cache_entry cache_id %u\n",
476 dprintk("mpoa: mpoa_caches.c: mps_ip = %pI4\n", 497 ntohl(entry->ctrl_info.cache_id));
477 &entry->ctrl_info.mps_ip); 498 dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
478 atomic_inc(&entry->use); 499 atomic_inc(&entry->use);
479 500
480 write_unlock_irq(&client->egress_lock); 501 write_unlock_irq(&client->egress_lock);
481 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: unlocked\n"); 502 dprintk("new_eg_cache_entry: unlocked\n");
482 503
483 return entry; 504 return entry;
484} 505}
485 506
486static void update_eg_cache_entry(eg_cache_entry * entry, uint16_t holding_time) 507static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
487{ 508{
488 do_gettimeofday(&(entry->tv)); 509 do_gettimeofday(&(entry->tv));
489 entry->entry_state = EGRESS_RESOLVED; 510 entry->entry_state = EGRESS_RESOLVED;
@@ -502,13 +523,14 @@ static void clear_expired(struct mpoa_client *client)
502 523
503 write_lock_irq(&client->egress_lock); 524 write_lock_irq(&client->egress_lock);
504 entry = client->eg_cache; 525 entry = client->eg_cache;
505 while(entry != NULL){ 526 while (entry != NULL) {
506 next_entry = entry->next; 527 next_entry = entry->next;
507 if((now.tv_sec - entry->tv.tv_sec) 528 if ((now.tv_sec - entry->tv.tv_sec)
508 > entry->ctrl_info.holding_time){ 529 > entry->ctrl_info.holding_time) {
509 msg.type = SND_EGRESS_PURGE; 530 msg.type = SND_EGRESS_PURGE;
510 msg.content.eg_info = entry->ctrl_info; 531 msg.content.eg_info = entry->ctrl_info;
511 dprintk("mpoa: mpoa_caches.c: egress_cache: holding time expired, cache_id = %lu.\n",ntohl(entry->ctrl_info.cache_id)); 532 dprintk("egress_cache: holding time expired, cache_id = %u.\n",
533 ntohl(entry->ctrl_info.cache_id));
512 msg_to_mpoad(&msg, client); 534 msg_to_mpoad(&msg, client);
513 client->eg_ops->remove_entry(entry, client); 535 client->eg_ops->remove_entry(entry, client);
514 } 536 }
@@ -522,7 +544,7 @@ static void clear_expired(struct mpoa_client *client)
522static void eg_destroy_cache(struct mpoa_client *mpc) 544static void eg_destroy_cache(struct mpoa_client *mpc)
523{ 545{
524 write_lock_irq(&mpc->egress_lock); 546 write_lock_irq(&mpc->egress_lock);
525 while(mpc->eg_cache != NULL) 547 while (mpc->eg_cache != NULL)
526 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); 548 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
527 write_unlock_irq(&mpc->egress_lock); 549 write_unlock_irq(&mpc->egress_lock);
528 550
@@ -530,7 +552,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
530} 552}
531 553
532 554
533
534static struct in_cache_ops ingress_ops = { 555static struct in_cache_ops ingress_ops = {
535 in_cache_add_entry, /* add_entry */ 556 in_cache_add_entry, /* add_entry */
536 in_cache_get, /* get */ 557 in_cache_get, /* get */
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 1a0f5ccea9c4..53e500292271 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -1,3 +1,4 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
1 2
2#ifdef CONFIG_PROC_FS 3#ifdef CONFIG_PROC_FS
3#include <linux/errno.h> 4#include <linux/errno.h>
@@ -8,9 +9,10 @@
8#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
9#include <linux/time.h> 10#include <linux/time.h>
10#include <linux/seq_file.h> 11#include <linux/seq_file.h>
11#include <asm/uaccess.h> 12#include <linux/uaccess.h>
12#include <linux/atmmpc.h> 13#include <linux/atmmpc.h>
13#include <linux/atm.h> 14#include <linux/atm.h>
15#include <linux/gfp.h>
14#include "mpc.h" 16#include "mpc.h"
15#include "mpoa_caches.h" 17#include "mpoa_caches.h"
16 18
@@ -20,9 +22,23 @@
20 */ 22 */
21 23
22#if 1 24#if 1
23#define dprintk printk /* debug */ 25#define dprintk(format, args...) \
26 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
24#else 27#else
25#define dprintk(format,args...) 28#define dprintk(format, args...) \
29 do { if (0) \
30 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
31 } while (0)
32#endif
33
34#if 0
35#define ddprintk(format, args...) \
36 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
37#else
38#define ddprintk(format, args...) \
39 do { if (0) \
40 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
41 } while (0)
26#endif 42#endif
27 43
28#define STAT_FILE_NAME "mpc" /* Our statistic file's name */ 44#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
@@ -51,42 +67,37 @@ static const struct file_operations mpc_file_operations = {
51/* 67/*
52 * Returns the state of an ingress cache entry as a string 68 * Returns the state of an ingress cache entry as a string
53 */ 69 */
54static const char *ingress_state_string(int state){ 70static const char *ingress_state_string(int state)
55 switch(state) { 71{
72 switch (state) {
56 case INGRESS_RESOLVING: 73 case INGRESS_RESOLVING:
57 return "resolving "; 74 return "resolving ";
58 break;
59 case INGRESS_RESOLVED: 75 case INGRESS_RESOLVED:
60 return "resolved "; 76 return "resolved ";
61 break;
62 case INGRESS_INVALID: 77 case INGRESS_INVALID:
63 return "invalid "; 78 return "invalid ";
64 break;
65 case INGRESS_REFRESHING: 79 case INGRESS_REFRESHING:
66 return "refreshing "; 80 return "refreshing ";
67 break;
68 default:
69 return "";
70 } 81 }
82
83 return "";
71} 84}
72 85
73/* 86/*
74 * Returns the state of an egress cache entry as a string 87 * Returns the state of an egress cache entry as a string
75 */ 88 */
76static const char *egress_state_string(int state){ 89static const char *egress_state_string(int state)
77 switch(state) { 90{
91 switch (state) {
78 case EGRESS_RESOLVED: 92 case EGRESS_RESOLVED:
79 return "resolved "; 93 return "resolved ";
80 break;
81 case EGRESS_PURGE: 94 case EGRESS_PURGE:
82 return "purge "; 95 return "purge ";
83 break;
84 case EGRESS_INVALID: 96 case EGRESS_INVALID:
85 return "invalid "; 97 return "invalid ";
86 break;
87 default:
88 return "";
89 } 98 }
99
100 return "";
90} 101}
91 102
92/* 103/*
@@ -123,7 +134,6 @@ static void mpc_stop(struct seq_file *m, void *v)
123static int mpc_show(struct seq_file *m, void *v) 134static int mpc_show(struct seq_file *m, void *v)
124{ 135{
125 struct mpoa_client *mpc = v; 136 struct mpoa_client *mpc = v;
126 unsigned char *temp;
127 int i; 137 int i;
128 in_cache_entry *in_entry; 138 in_cache_entry *in_entry;
129 eg_cache_entry *eg_entry; 139 eg_cache_entry *eg_entry;
@@ -140,15 +150,17 @@ static int mpc_show(struct seq_file *m, void *v)
140 do_gettimeofday(&now); 150 do_gettimeofday(&now);
141 151
142 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) { 152 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
143 temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip; 153 sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
144 sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
145 seq_printf(m, "%-16s%s%-14lu%-12u", 154 seq_printf(m, "%-16s%s%-14lu%-12u",
146 ip_string, 155 ip_string,
147 ingress_state_string(in_entry->entry_state), 156 ingress_state_string(in_entry->entry_state),
148 in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec), 157 in_entry->ctrl_info.holding_time -
149 in_entry->packets_fwded); 158 (now.tv_sec-in_entry->tv.tv_sec),
159 in_entry->packets_fwded);
150 if (in_entry->shortcut) 160 if (in_entry->shortcut)
151 seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci); 161 seq_printf(m, " %-3d %-3d",
162 in_entry->shortcut->vpi,
163 in_entry->shortcut->vci);
152 seq_printf(m, "\n"); 164 seq_printf(m, "\n");
153 } 165 }
154 166
@@ -156,21 +168,23 @@ static int mpc_show(struct seq_file *m, void *v)
156 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n"); 168 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
157 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) { 169 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
158 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr; 170 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
159 for(i = 0; i < ATM_ESA_LEN; i++) 171 for (i = 0; i < ATM_ESA_LEN; i++)
160 seq_printf(m, "%02x", p[i]); 172 seq_printf(m, "%02x", p[i]);
161 seq_printf(m, "\n%-16lu%s%-14lu%-15u", 173 seq_printf(m, "\n%-16lu%s%-14lu%-15u",
162 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id), 174 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
163 egress_state_string(eg_entry->entry_state), 175 egress_state_string(eg_entry->entry_state),
164 (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), 176 (eg_entry->ctrl_info.holding_time -
177 (now.tv_sec-eg_entry->tv.tv_sec)),
165 eg_entry->packets_rcvd); 178 eg_entry->packets_rcvd);
166 179
167 /* latest IP address */ 180 /* latest IP address */
168 temp = (unsigned char *)&eg_entry->latest_ip_addr; 181 sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
169 sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
170 seq_printf(m, "%-16s", ip_string); 182 seq_printf(m, "%-16s", ip_string);
171 183
172 if (eg_entry->shortcut) 184 if (eg_entry->shortcut)
173 seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci); 185 seq_printf(m, " %-3d %-3d",
186 eg_entry->shortcut->vpi,
187 eg_entry->shortcut->vci);
174 seq_printf(m, "\n"); 188 seq_printf(m, "\n");
175 } 189 }
176 seq_printf(m, "\n"); 190 seq_printf(m, "\n");
@@ -258,12 +272,9 @@ static int parse_qos(const char *buff)
258 qos.rxtp.max_pcr = rx_pcr; 272 qos.rxtp.max_pcr = rx_pcr;
259 qos.rxtp.max_sdu = rx_sdu; 273 qos.rxtp.max_sdu = rx_sdu;
260 qos.aal = ATM_AAL5; 274 qos.aal = ATM_AAL5;
261 dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n", 275 dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
262 qos.txtp.max_pcr, 276 qos.txtp.max_pcr, qos.txtp.max_sdu,
263 qos.txtp.max_sdu, 277 qos.rxtp.max_pcr, qos.rxtp.max_sdu);
264 qos.rxtp.max_pcr,
265 qos.rxtp.max_sdu
266 );
267 278
268 atm_mpoa_add_qos(ipaddr, &qos); 279 atm_mpoa_add_qos(ipaddr, &qos);
269 return 1; 280 return 1;
@@ -278,7 +289,7 @@ int mpc_proc_init(void)
278 289
279 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations); 290 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
280 if (!p) { 291 if (!p) {
281 printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME); 292 pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
282 return -ENOMEM; 293 return -ENOMEM;
283 } 294 }
284 return 0; 295 return 0;
@@ -289,10 +300,9 @@ int mpc_proc_init(void)
289 */ 300 */
290void mpc_proc_clean(void) 301void mpc_proc_clean(void)
291{ 302{
292 remove_proc_entry(STAT_FILE_NAME,atm_proc_root); 303 remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
293} 304}
294 305
295
296#endif /* CONFIG_PROC_FS */ 306#endif /* CONFIG_PROC_FS */
297 307
298 308
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 0af84cd4f65b..e49bb6d948a1 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -33,9 +33,12 @@
33 * These hooks are not yet available in ppp_generic 33 * These hooks are not yet available in ppp_generic
34 */ 34 */
35 35
36#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
37
36#include <linux/module.h> 38#include <linux/module.h>
37#include <linux/init.h> 39#include <linux/init.h>
38#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/slab.h>
39#include <linux/atm.h> 42#include <linux/atm.h>
40#include <linux/atmdev.h> 43#include <linux/atmdev.h>
41#include <linux/capability.h> 44#include <linux/capability.h>
@@ -132,7 +135,7 @@ static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
132static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb) 135static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
133{ 136{
134 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); 137 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
135 pr_debug("pppoatm push\n"); 138 pr_debug("\n");
136 if (skb == NULL) { /* VCC was closed */ 139 if (skb == NULL) { /* VCC was closed */
137 pr_debug("removing ATMPPP VCC %p\n", pvcc); 140 pr_debug("removing ATMPPP VCC %p\n", pvcc);
138 pppoatm_unassign_vcc(atmvcc); 141 pppoatm_unassign_vcc(atmvcc);
@@ -165,17 +168,17 @@ static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
165 pvcc->chan.mtu += LLC_LEN; 168 pvcc->chan.mtu += LLC_LEN;
166 break; 169 break;
167 } 170 }
168 pr_debug("Couldn't autodetect yet " 171 pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
169 "(skb: %02X %02X %02X %02X %02X %02X)\n", 172 skb->data[0], skb->data[1], skb->data[2],
170 skb->data[0], skb->data[1], skb->data[2], 173 skb->data[3], skb->data[4], skb->data[5]);
171 skb->data[3], skb->data[4], skb->data[5]);
172 goto error; 174 goto error;
173 case e_vc: 175 case e_vc:
174 break; 176 break;
175 } 177 }
176 ppp_input(&pvcc->chan, skb); 178 ppp_input(&pvcc->chan, skb);
177 return; 179 return;
178 error: 180
181error:
179 kfree_skb(skb); 182 kfree_skb(skb);
180 ppp_input_error(&pvcc->chan, 0); 183 ppp_input_error(&pvcc->chan, 0);
181} 184}
@@ -194,7 +197,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
194{ 197{
195 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan); 198 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
196 ATM_SKB(skb)->vcc = pvcc->atmvcc; 199 ATM_SKB(skb)->vcc = pvcc->atmvcc;
197 pr_debug("pppoatm_send (skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc); 200 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
198 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT)) 201 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
199 (void) skb_pull(skb, 1); 202 (void) skb_pull(skb, 1);
200 switch (pvcc->encaps) { /* LLC encapsulation needed */ 203 switch (pvcc->encaps) { /* LLC encapsulation needed */
@@ -208,7 +211,8 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
208 goto nospace; 211 goto nospace;
209 } 212 }
210 kfree_skb(skb); 213 kfree_skb(skb);
211 if ((skb = n) == NULL) 214 skb = n;
215 if (skb == NULL)
212 return DROP_PACKET; 216 return DROP_PACKET;
213 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 217 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
214 goto nospace; 218 goto nospace;
@@ -226,11 +230,11 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
226 230
227 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); 231 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
228 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; 232 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
229 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc, 233 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
230 ATM_SKB(skb)->vcc->dev); 234 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
231 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) 235 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
232 ? DROP_PACKET : 1; 236 ? DROP_PACKET : 1;
233 nospace: 237nospace:
234 /* 238 /*
235 * We don't have space to send this SKB now, but we might have 239 * We don't have space to send this SKB now, but we might have
236 * already applied SC_COMP_PROT compression, so may need to undo 240 * already applied SC_COMP_PROT compression, so may need to undo
@@ -289,7 +293,8 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
289 (be.encaps == e_vc ? 0 : LLC_LEN); 293 (be.encaps == e_vc ? 0 : LLC_LEN);
290 pvcc->wakeup_tasklet = tasklet_proto; 294 pvcc->wakeup_tasklet = tasklet_proto;
291 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan; 295 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
292 if ((err = ppp_register_channel(&pvcc->chan)) != 0) { 296 err = ppp_register_channel(&pvcc->chan);
297 if (err != 0) {
293 kfree(pvcc); 298 kfree(pvcc);
294 return err; 299 return err;
295 } 300 }
diff --git a/net/atm/proc.c b/net/atm/proc.c
index ab8419a324b6..696e218436e5 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -22,17 +22,18 @@
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/atmclip.h> 23#include <linux/atmclip.h>
24#include <linux/init.h> /* for __init */ 24#include <linux/init.h> /* for __init */
25#include <linux/slab.h>
25#include <net/net_namespace.h> 26#include <net/net_namespace.h>
26#include <net/atmclip.h> 27#include <net/atmclip.h>
27#include <asm/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/param.h> /* for HZ */
28#include <asm/atomic.h> 30#include <asm/atomic.h>
29#include <asm/param.h> /* for HZ */
30#include "resources.h" 31#include "resources.h"
31#include "common.h" /* atm_proc_init prototype */ 32#include "common.h" /* atm_proc_init prototype */
32#include "signaling.h" /* to get sigd - ugly too */ 33#include "signaling.h" /* to get sigd - ugly too */
33 34
34static ssize_t proc_dev_atm_read(struct file *file,char __user *buf,size_t count, 35static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
35 loff_t *pos); 36 size_t count, loff_t *pos);
36 37
37static const struct file_operations proc_atm_dev_ops = { 38static const struct file_operations proc_atm_dev_ops = {
38 .owner = THIS_MODULE, 39 .owner = THIS_MODULE,
@@ -43,9 +44,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
43 const struct k_atm_aal_stats *stats) 44 const struct k_atm_aal_stats *stats)
44{ 45{
45 seq_printf(seq, "%s ( %d %d %d %d %d )", aal, 46 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
46 atomic_read(&stats->tx),atomic_read(&stats->tx_err), 47 atomic_read(&stats->tx), atomic_read(&stats->tx_err),
47 atomic_read(&stats->rx),atomic_read(&stats->rx_err), 48 atomic_read(&stats->rx), atomic_read(&stats->rx_err),
48 atomic_read(&stats->rx_drop)); 49 atomic_read(&stats->rx_drop));
49} 50}
50 51
51static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) 52static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
@@ -151,8 +152,8 @@ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
151 152
152static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) 153static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
153{ 154{
154 static const char *const class_name[] = 155 static const char *const class_name[] = {
155 {"off","UBR","CBR","VBR","ABR"}; 156 "off", "UBR", "CBR", "VBR", "ABR"};
156 static const char *const aal_name[] = { 157 static const char *const aal_name[] = {
157 "---", "1", "2", "3/4", /* 0- 3 */ 158 "---", "1", "2", "3/4", /* 0- 3 */
158 "???", "5", "???", "???", /* 4- 7 */ 159 "???", "5", "???", "???", /* 4- 7 */
@@ -160,11 +161,12 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
160 "???", "0", "???", "???"}; /* 12-15 */ 161 "???", "0", "???", "???"}; /* 12-15 */
161 162
162 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s", 163 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s",
163 vcc->dev->number,vcc->vpi,vcc->vci, 164 vcc->dev->number, vcc->vpi, vcc->vci,
164 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" : 165 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
165 aal_name[vcc->qos.aal],vcc->qos.rxtp.min_pcr, 166 aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr,
166 class_name[vcc->qos.rxtp.traffic_class],vcc->qos.txtp.min_pcr, 167 class_name[vcc->qos.rxtp.traffic_class],
167 class_name[vcc->qos.txtp.traffic_class]); 168 vcc->qos.txtp.min_pcr,
169 class_name[vcc->qos.txtp.traffic_class]);
168 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) { 170 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) {
169 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 171 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
170 struct net_device *dev; 172 struct net_device *dev;
@@ -195,19 +197,20 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
195 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, 197 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
196 vcc->vci); 198 vcc->vci);
197 switch (sk->sk_family) { 199 switch (sk->sk_family) {
198 case AF_ATMPVC: 200 case AF_ATMPVC:
199 seq_printf(seq, "PVC"); 201 seq_printf(seq, "PVC");
200 break; 202 break;
201 case AF_ATMSVC: 203 case AF_ATMSVC:
202 seq_printf(seq, "SVC"); 204 seq_printf(seq, "SVC");
203 break; 205 break;
204 default: 206 default:
205 seq_printf(seq, "%3d", sk->sk_family); 207 seq_printf(seq, "%3d", sk->sk_family);
206 } 208 }
207 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, 209 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n",
208 sk_wmem_alloc_get(sk), sk->sk_sndbuf, 210 vcc->flags, sk->sk_err,
209 sk_rmem_alloc_get(sk), sk->sk_rcvbuf, 211 sk_wmem_alloc_get(sk), sk->sk_sndbuf,
210 atomic_read(&sk->sk_refcnt)); 212 sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
213 atomic_read(&sk->sk_refcnt));
211} 214}
212 215
213static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) 216static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
@@ -236,7 +239,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
236 "Itf Type ESI/\"MAC\"addr " 239 "Itf Type ESI/\"MAC\"addr "
237 "AAL(TX,err,RX,err,drop) ... [refcnt]\n"; 240 "AAL(TX,err,RX,err,drop) ... [refcnt]\n";
238 241
239 if (v == SEQ_START_TOKEN) 242 if (v == &atm_devs)
240 seq_puts(seq, atm_dev_banner); 243 seq_puts(seq, atm_dev_banner);
241 else { 244 else {
242 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list); 245 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
@@ -376,32 +379,35 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
376 unsigned long page; 379 unsigned long page;
377 int length; 380 int length;
378 381
379 if (count == 0) return 0; 382 if (count == 0)
383 return 0;
380 page = get_zeroed_page(GFP_KERNEL); 384 page = get_zeroed_page(GFP_KERNEL);
381 if (!page) return -ENOMEM; 385 if (!page)
386 return -ENOMEM;
382 dev = PDE(file->f_path.dentry->d_inode)->data; 387 dev = PDE(file->f_path.dentry->d_inode)->data;
383 if (!dev->ops->proc_read) 388 if (!dev->ops->proc_read)
384 length = -EINVAL; 389 length = -EINVAL;
385 else { 390 else {
386 length = dev->ops->proc_read(dev,pos,(char *) page); 391 length = dev->ops->proc_read(dev, pos, (char *)page);
387 if (length > count) length = -EINVAL; 392 if (length > count)
393 length = -EINVAL;
388 } 394 }
389 if (length >= 0) { 395 if (length >= 0) {
390 if (copy_to_user(buf,(char *) page,length)) length = -EFAULT; 396 if (copy_to_user(buf, (char *)page, length))
397 length = -EFAULT;
391 (*pos)++; 398 (*pos)++;
392 } 399 }
393 free_page(page); 400 free_page(page);
394 return length; 401 return length;
395} 402}
396 403
397
398struct proc_dir_entry *atm_proc_root; 404struct proc_dir_entry *atm_proc_root;
399EXPORT_SYMBOL(atm_proc_root); 405EXPORT_SYMBOL(atm_proc_root);
400 406
401 407
402int atm_proc_dev_register(struct atm_dev *dev) 408int atm_proc_dev_register(struct atm_dev *dev)
403{ 409{
404 int digits,num; 410 int digits, num;
405 int error; 411 int error;
406 412
407 /* No proc info */ 413 /* No proc info */
@@ -410,26 +416,28 @@ int atm_proc_dev_register(struct atm_dev *dev)
410 416
411 error = -ENOMEM; 417 error = -ENOMEM;
412 digits = 0; 418 digits = 0;
413 for (num = dev->number; num; num /= 10) digits++; 419 for (num = dev->number; num; num /= 10)
414 if (!digits) digits++; 420 digits++;
421 if (!digits)
422 digits++;
415 423
416 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL); 424 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
417 if (!dev->proc_name) 425 if (!dev->proc_name)
418 goto err_out; 426 goto err_out;
419 sprintf(dev->proc_name,"%s:%d",dev->type, dev->number); 427 sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
420 428
421 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, 429 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
422 &proc_atm_dev_ops, dev); 430 &proc_atm_dev_ops, dev);
423 if (!dev->proc_entry) 431 if (!dev->proc_entry)
424 goto err_free_name; 432 goto err_free_name;
425 return 0; 433 return 0;
434
426err_free_name: 435err_free_name:
427 kfree(dev->proc_name); 436 kfree(dev->proc_name);
428err_out: 437err_out:
429 return error; 438 return error;
430} 439}
431 440
432
433void atm_proc_dev_deregister(struct atm_dev *dev) 441void atm_proc_dev_deregister(struct atm_dev *dev)
434{ 442{
435 if (!dev->ops->proc_read) 443 if (!dev->ops->proc_read)
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 8d74e62b0d79..437ee70c5e62 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -17,32 +17,35 @@
17#include "common.h" /* common for PVCs and SVCs */ 17#include "common.h" /* common for PVCs and SVCs */
18 18
19 19
20static int pvc_shutdown(struct socket *sock,int how) 20static int pvc_shutdown(struct socket *sock, int how)
21{ 21{
22 return 0; 22 return 0;
23} 23}
24 24
25 25static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
26static int pvc_bind(struct socket *sock,struct sockaddr *sockaddr, 26 int sockaddr_len)
27 int sockaddr_len)
28{ 27{
29 struct sock *sk = sock->sk; 28 struct sock *sk = sock->sk;
30 struct sockaddr_atmpvc *addr; 29 struct sockaddr_atmpvc *addr;
31 struct atm_vcc *vcc; 30 struct atm_vcc *vcc;
32 int error; 31 int error;
33 32
34 if (sockaddr_len != sizeof(struct sockaddr_atmpvc)) return -EINVAL; 33 if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
35 addr = (struct sockaddr_atmpvc *) sockaddr; 34 return -EINVAL;
36 if (addr->sap_family != AF_ATMPVC) return -EAFNOSUPPORT; 35 addr = (struct sockaddr_atmpvc *)sockaddr;
36 if (addr->sap_family != AF_ATMPVC)
37 return -EAFNOSUPPORT;
37 lock_sock(sk); 38 lock_sock(sk);
38 vcc = ATM_SD(sock); 39 vcc = ATM_SD(sock);
39 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { 40 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
40 error = -EBADFD; 41 error = -EBADFD;
41 goto out; 42 goto out;
42 } 43 }
43 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) { 44 if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
44 if (vcc->vpi != ATM_VPI_UNSPEC) addr->sap_addr.vpi = vcc->vpi; 45 if (vcc->vpi != ATM_VPI_UNSPEC)
45 if (vcc->vci != ATM_VCI_UNSPEC) addr->sap_addr.vci = vcc->vci; 46 addr->sap_addr.vpi = vcc->vpi;
47 if (vcc->vci != ATM_VCI_UNSPEC)
48 addr->sap_addr.vci = vcc->vci;
46 } 49 }
47 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi, 50 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
48 addr->sap_addr.vci); 51 addr->sap_addr.vci);
@@ -51,11 +54,10 @@ out:
51 return error; 54 return error;
52} 55}
53 56
54 57static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
55static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr, 58 int sockaddr_len, int flags)
56 int sockaddr_len,int flags)
57{ 59{
58 return pvc_bind(sock,sockaddr,sockaddr_len); 60 return pvc_bind(sock, sockaddr, sockaddr_len);
59} 61}
60 62
61static int pvc_setsockopt(struct socket *sock, int level, int optname, 63static int pvc_setsockopt(struct socket *sock, int level, int optname,
@@ -70,7 +72,6 @@ static int pvc_setsockopt(struct socket *sock, int level, int optname,
70 return error; 72 return error;
71} 73}
72 74
73
74static int pvc_getsockopt(struct socket *sock, int level, int optname, 75static int pvc_getsockopt(struct socket *sock, int level, int optname,
75 char __user *optval, int __user *optlen) 76 char __user *optval, int __user *optlen)
76{ 77{
@@ -83,16 +84,16 @@ static int pvc_getsockopt(struct socket *sock, int level, int optname,
83 return error; 84 return error;
84} 85}
85 86
86 87static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
87static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr, 88 int *sockaddr_len, int peer)
88 int *sockaddr_len,int peer)
89{ 89{
90 struct sockaddr_atmpvc *addr; 90 struct sockaddr_atmpvc *addr;
91 struct atm_vcc *vcc = ATM_SD(sock); 91 struct atm_vcc *vcc = ATM_SD(sock);
92 92
93 if (!vcc->dev || !test_bit(ATM_VF_ADDR,&vcc->flags)) return -ENOTCONN; 93 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
94 return -ENOTCONN;
94 *sockaddr_len = sizeof(struct sockaddr_atmpvc); 95 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
95 addr = (struct sockaddr_atmpvc *) sockaddr; 96 addr = (struct sockaddr_atmpvc *)sockaddr;
96 addr->sap_family = AF_ATMPVC; 97 addr->sap_family = AF_ATMPVC;
97 addr->sap_addr.itf = vcc->dev->number; 98 addr->sap_addr.itf = vcc->dev->number;
98 addr->sap_addr.vpi = vcc->vpi; 99 addr->sap_addr.vpi = vcc->vpi;
@@ -100,7 +101,6 @@ static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr,
100 return 0; 101 return 0;
101} 102}
102 103
103
104static const struct proto_ops pvc_proto_ops = { 104static const struct proto_ops pvc_proto_ops = {
105 .family = PF_ATMPVC, 105 .family = PF_ATMPVC,
106 .owner = THIS_MODULE, 106 .owner = THIS_MODULE,
@@ -137,7 +137,6 @@ static int pvc_create(struct net *net, struct socket *sock, int protocol,
137 return vcc_create(net, sock, protocol, PF_ATMPVC); 137 return vcc_create(net, sock, protocol, PF_ATMPVC);
138} 138}
139 139
140
141static const struct net_proto_family pvc_family_ops = { 140static const struct net_proto_family pvc_family_ops = {
142 .family = PF_ATMPVC, 141 .family = PF_ATMPVC,
143 .create = pvc_create, 142 .create = pvc_create,
diff --git a/net/atm/raw.c b/net/atm/raw.c
index cbfcc71a17b1..b4f7b9ff3c74 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/atmdev.h> 8#include <linux/atmdev.h>
@@ -9,6 +10,7 @@
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/skbuff.h> 11#include <linux/skbuff.h>
11#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/slab.h>
12 14
13#include "common.h" 15#include "common.h"
14#include "protocols.h" 16#include "protocols.h"
@@ -17,7 +19,7 @@
17 * SKB == NULL indicates that the link is being closed 19 * SKB == NULL indicates that the link is being closed
18 */ 20 */
19 21
20static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb) 22static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
21{ 23{
22 if (skb) { 24 if (skb) {
23 struct sock *sk = sk_atm(vcc); 25 struct sock *sk = sk_atm(vcc);
@@ -27,36 +29,33 @@ static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
27 } 29 }
28} 30}
29 31
30 32static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
31static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
32{ 33{
33 struct sock *sk = sk_atm(vcc); 34 struct sock *sk = sk_atm(vcc);
34 35
35 pr_debug("APopR (%d) %d -= %d\n", vcc->vci, 36 pr_debug("(%d) %d -= %d\n",
36 sk_wmem_alloc_get(sk), skb->truesize); 37 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 38 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
38 dev_kfree_skb_any(skb); 39 dev_kfree_skb_any(skb);
39 sk->sk_write_space(sk); 40 sk->sk_write_space(sk);
40} 41}
41 42
42 43static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
43static int atm_send_aal0(struct atm_vcc *vcc,struct sk_buff *skb)
44{ 44{
45 /* 45 /*
46 * Note that if vpi/vci are _ANY or _UNSPEC the below will 46 * Note that if vpi/vci are _ANY or _UNSPEC the below will
47 * still work 47 * still work
48 */ 48 */
49 if (!capable(CAP_NET_ADMIN) && 49 if (!capable(CAP_NET_ADMIN) &&
50 (((u32 *) skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) != 50 (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
51 ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT))) 51 ((vcc->vpi << ATM_HDR_VPI_SHIFT) |
52 { 52 (vcc->vci << ATM_HDR_VCI_SHIFT))) {
53 kfree_skb(skb); 53 kfree_skb(skb);
54 return -EADDRNOTAVAIL; 54 return -EADDRNOTAVAIL;
55 } 55 }
56 return vcc->dev->ops->send(vcc,skb); 56 return vcc->dev->ops->send(vcc, skb);
57} 57}
58 58
59
60int atm_init_aal0(struct atm_vcc *vcc) 59int atm_init_aal0(struct atm_vcc *vcc)
61{ 60{
62 vcc->push = atm_push_raw; 61 vcc->push = atm_push_raw;
@@ -66,7 +65,6 @@ int atm_init_aal0(struct atm_vcc *vcc)
66 return 0; 65 return 0;
67} 66}
68 67
69
70int atm_init_aal34(struct atm_vcc *vcc) 68int atm_init_aal34(struct atm_vcc *vcc)
71{ 69{
72 vcc->push = atm_push_raw; 70 vcc->push = atm_push_raw;
@@ -76,7 +74,6 @@ int atm_init_aal34(struct atm_vcc *vcc)
76 return 0; 74 return 0;
77} 75}
78 76
79
80int atm_init_aal5(struct atm_vcc *vcc) 77int atm_init_aal5(struct atm_vcc *vcc)
81{ 78{
82 vcc->push = atm_push_raw; 79 vcc->push = atm_push_raw;
@@ -85,6 +82,4 @@ int atm_init_aal5(struct atm_vcc *vcc)
85 vcc->send = vcc->dev->ops->send; 82 vcc->send = vcc->dev->ops->send;
86 return 0; 83 return 0;
87} 84}
88
89
90EXPORT_SYMBOL(atm_init_aal5); 85EXPORT_SYMBOL(atm_init_aal5);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 56b7322ff461..d29e58261511 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -7,6 +7,7 @@
7 * 2002/01 - don't free the whole struct sock on sk->destruct time, 7 * 2002/01 - don't free the whole struct sock on sk->destruct time,
8 * use the default destruct function initialized by sock_init_data */ 8 * use the default destruct function initialized by sock_init_data */
9 9
10#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10 11
11#include <linux/ctype.h> 12#include <linux/ctype.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -18,6 +19,7 @@
18#include <linux/capability.h> 19#include <linux/capability.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/slab.h>
21 23
22#include <net/sock.h> /* for struct sock */ 24#include <net/sock.h> /* for struct sock */
23 25
@@ -70,7 +72,7 @@ struct atm_dev *atm_dev_lookup(int number)
70 mutex_unlock(&atm_dev_mutex); 72 mutex_unlock(&atm_dev_mutex);
71 return dev; 73 return dev;
72} 74}
73 75EXPORT_SYMBOL(atm_dev_lookup);
74 76
75struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, 77struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
76 int number, unsigned long *flags) 78 int number, unsigned long *flags)
@@ -79,13 +81,13 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
79 81
80 dev = __alloc_atm_dev(type); 82 dev = __alloc_atm_dev(type);
81 if (!dev) { 83 if (!dev) {
82 printk(KERN_ERR "atm_dev_register: no space for dev %s\n", 84 pr_err("no space for dev %s\n", type);
83 type);
84 return NULL; 85 return NULL;
85 } 86 }
86 mutex_lock(&atm_dev_mutex); 87 mutex_lock(&atm_dev_mutex);
87 if (number != -1) { 88 if (number != -1) {
88 if ((inuse = __atm_dev_lookup(number))) { 89 inuse = __atm_dev_lookup(number);
90 if (inuse) {
89 atm_dev_put(inuse); 91 atm_dev_put(inuse);
90 mutex_unlock(&atm_dev_mutex); 92 mutex_unlock(&atm_dev_mutex);
91 kfree(dev); 93 kfree(dev);
@@ -109,16 +111,12 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
109 atomic_set(&dev->refcnt, 1); 111 atomic_set(&dev->refcnt, 1);
110 112
111 if (atm_proc_dev_register(dev) < 0) { 113 if (atm_proc_dev_register(dev) < 0) {
112 printk(KERN_ERR "atm_dev_register: " 114 pr_err("atm_proc_dev_register failed for dev %s\n", type);
113 "atm_proc_dev_register failed for dev %s\n",
114 type);
115 goto out_fail; 115 goto out_fail;
116 } 116 }
117 117
118 if (atm_register_sysfs(dev) < 0) { 118 if (atm_register_sysfs(dev) < 0) {
119 printk(KERN_ERR "atm_dev_register: " 119 pr_err("atm_register_sysfs failed for dev %s\n", type);
120 "atm_register_sysfs failed for dev %s\n",
121 type);
122 atm_proc_dev_deregister(dev); 120 atm_proc_dev_deregister(dev);
123 goto out_fail; 121 goto out_fail;
124 } 122 }
@@ -134,7 +132,7 @@ out_fail:
134 dev = NULL; 132 dev = NULL;
135 goto out; 133 goto out;
136} 134}
137 135EXPORT_SYMBOL(atm_dev_register);
138 136
139void atm_dev_deregister(struct atm_dev *dev) 137void atm_dev_deregister(struct atm_dev *dev)
140{ 138{
@@ -156,7 +154,7 @@ void atm_dev_deregister(struct atm_dev *dev)
156 154
157 atm_dev_put(dev); 155 atm_dev_put(dev);
158} 156}
159 157EXPORT_SYMBOL(atm_dev_deregister);
160 158
161static void copy_aal_stats(struct k_atm_aal_stats *from, 159static void copy_aal_stats(struct k_atm_aal_stats *from,
162 struct atm_aal_stats *to) 160 struct atm_aal_stats *to)
@@ -166,7 +164,6 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
166#undef __HANDLE_ITEM 164#undef __HANDLE_ITEM
167} 165}
168 166
169
170static void subtract_aal_stats(struct k_atm_aal_stats *from, 167static void subtract_aal_stats(struct k_atm_aal_stats *from,
171 struct atm_aal_stats *to) 168 struct atm_aal_stats *to)
172{ 169{
@@ -175,8 +172,8 @@ static void subtract_aal_stats(struct k_atm_aal_stats *from,
175#undef __HANDLE_ITEM 172#undef __HANDLE_ITEM
176} 173}
177 174
178 175static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg,
179static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, int zero) 176 int zero)
180{ 177{
181 struct atm_dev_stats tmp; 178 struct atm_dev_stats tmp;
182 int error = 0; 179 int error = 0;
@@ -194,7 +191,6 @@ static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, in
194 return error ? -EFAULT : 0; 191 return error ? -EFAULT : 0;
195} 192}
196 193
197
198int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat) 194int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
199{ 195{
200 void __user *buf; 196 void __user *buf;
@@ -210,50 +206,49 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
210#endif 206#endif
211 207
212 switch (cmd) { 208 switch (cmd) {
213 case ATM_GETNAMES: 209 case ATM_GETNAMES:
214 210 if (compat) {
215 if (compat) {
216#ifdef CONFIG_COMPAT 211#ifdef CONFIG_COMPAT
217 struct compat_atm_iobuf __user *ciobuf = arg; 212 struct compat_atm_iobuf __user *ciobuf = arg;
218 compat_uptr_t cbuf; 213 compat_uptr_t cbuf;
219 iobuf_len = &ciobuf->length; 214 iobuf_len = &ciobuf->length;
220 if (get_user(cbuf, &ciobuf->buffer)) 215 if (get_user(cbuf, &ciobuf->buffer))
221 return -EFAULT; 216 return -EFAULT;
222 buf = compat_ptr(cbuf); 217 buf = compat_ptr(cbuf);
223#endif 218#endif
224 } else { 219 } else {
225 struct atm_iobuf __user *iobuf = arg; 220 struct atm_iobuf __user *iobuf = arg;
226 iobuf_len = &iobuf->length; 221 iobuf_len = &iobuf->length;
227 if (get_user(buf, &iobuf->buffer)) 222 if (get_user(buf, &iobuf->buffer))
228 return -EFAULT;
229 }
230 if (get_user(len, iobuf_len))
231 return -EFAULT; 223 return -EFAULT;
232 mutex_lock(&atm_dev_mutex); 224 }
233 list_for_each(p, &atm_devs) 225 if (get_user(len, iobuf_len))
234 size += sizeof(int); 226 return -EFAULT;
235 if (size > len) { 227 mutex_lock(&atm_dev_mutex);
236 mutex_unlock(&atm_dev_mutex); 228 list_for_each(p, &atm_devs)
237 return -E2BIG; 229 size += sizeof(int);
238 } 230 if (size > len) {
239 tmp_buf = kmalloc(size, GFP_ATOMIC); 231 mutex_unlock(&atm_dev_mutex);
240 if (!tmp_buf) { 232 return -E2BIG;
241 mutex_unlock(&atm_dev_mutex); 233 }
242 return -ENOMEM; 234 tmp_buf = kmalloc(size, GFP_ATOMIC);
243 } 235 if (!tmp_buf) {
244 tmp_p = tmp_buf;
245 list_for_each(p, &atm_devs) {
246 dev = list_entry(p, struct atm_dev, dev_list);
247 *tmp_p++ = dev->number;
248 }
249 mutex_unlock(&atm_dev_mutex); 236 mutex_unlock(&atm_dev_mutex);
250 error = ((copy_to_user(buf, tmp_buf, size)) || 237 return -ENOMEM;
251 put_user(size, iobuf_len)) 238 }
252 ? -EFAULT : 0; 239 tmp_p = tmp_buf;
253 kfree(tmp_buf); 240 list_for_each(p, &atm_devs) {
254 return error; 241 dev = list_entry(p, struct atm_dev, dev_list);
255 default: 242 *tmp_p++ = dev->number;
256 break; 243 }
244 mutex_unlock(&atm_dev_mutex);
245 error = ((copy_to_user(buf, tmp_buf, size)) ||
246 put_user(size, iobuf_len))
247 ? -EFAULT : 0;
248 kfree(tmp_buf);
249 return error;
250 default:
251 break;
257 } 252 }
258 253
259 if (compat) { 254 if (compat) {
@@ -282,166 +277,167 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
282 if (get_user(number, &sioc->number)) 277 if (get_user(number, &sioc->number))
283 return -EFAULT; 278 return -EFAULT;
284 } 279 }
285 if (!(dev = try_then_request_module(atm_dev_lookup(number), 280
286 "atm-device-%d", number))) 281 dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d",
282 number);
283 if (!dev)
287 return -ENODEV; 284 return -ENODEV;
288 285
289 switch (cmd) { 286 switch (cmd) {
290 case ATM_GETTYPE: 287 case ATM_GETTYPE:
291 size = strlen(dev->type) + 1; 288 size = strlen(dev->type) + 1;
292 if (copy_to_user(buf, dev->type, size)) { 289 if (copy_to_user(buf, dev->type, size)) {
293 error = -EFAULT; 290 error = -EFAULT;
294 goto done; 291 goto done;
295 } 292 }
296 break; 293 break;
297 case ATM_GETESI: 294 case ATM_GETESI:
298 size = ESI_LEN; 295 size = ESI_LEN;
299 if (copy_to_user(buf, dev->esi, size)) { 296 if (copy_to_user(buf, dev->esi, size)) {
300 error = -EFAULT; 297 error = -EFAULT;
301 goto done; 298 goto done;
302 } 299 }
303 break; 300 break;
304 case ATM_SETESI: 301 case ATM_SETESI:
305 { 302 {
306 int i; 303 int i;
307 304
308 for (i = 0; i < ESI_LEN; i++) 305 for (i = 0; i < ESI_LEN; i++)
309 if (dev->esi[i]) { 306 if (dev->esi[i]) {
310 error = -EEXIST; 307 error = -EEXIST;
311 goto done;
312 }
313 }
314 /* fall through */
315 case ATM_SETESIF:
316 {
317 unsigned char esi[ESI_LEN];
318
319 if (!capable(CAP_NET_ADMIN)) {
320 error = -EPERM;
321 goto done;
322 }
323 if (copy_from_user(esi, buf, ESI_LEN)) {
324 error = -EFAULT;
325 goto done;
326 }
327 memcpy(dev->esi, esi, ESI_LEN);
328 error = ESI_LEN;
329 goto done;
330 }
331 case ATM_GETSTATZ:
332 if (!capable(CAP_NET_ADMIN)) {
333 error = -EPERM;
334 goto done;
335 }
336 /* fall through */
337 case ATM_GETSTAT:
338 size = sizeof(struct atm_dev_stats);
339 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
340 if (error)
341 goto done;
342 break;
343 case ATM_GETCIRANGE:
344 size = sizeof(struct atm_cirange);
345 if (copy_to_user(buf, &dev->ci_range, size)) {
346 error = -EFAULT;
347 goto done;
348 }
349 break;
350 case ATM_GETLINKRATE:
351 size = sizeof(int);
352 if (copy_to_user(buf, &dev->link_rate, size)) {
353 error = -EFAULT;
354 goto done;
355 }
356 break;
357 case ATM_RSTADDR:
358 if (!capable(CAP_NET_ADMIN)) {
359 error = -EPERM;
360 goto done;
361 }
362 atm_reset_addr(dev, ATM_ADDR_LOCAL);
363 break;
364 case ATM_ADDADDR:
365 case ATM_DELADDR:
366 case ATM_ADDLECSADDR:
367 case ATM_DELLECSADDR:
368 if (!capable(CAP_NET_ADMIN)) {
369 error = -EPERM;
370 goto done;
371 }
372 {
373 struct sockaddr_atmsvc addr;
374
375 if (copy_from_user(&addr, buf, sizeof(addr))) {
376 error = -EFAULT;
377 goto done;
378 }
379 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
380 error = atm_add_addr(dev, &addr,
381 (cmd == ATM_ADDADDR ?
382 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
383 else
384 error = atm_del_addr(dev, &addr,
385 (cmd == ATM_DELADDR ?
386 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
387 goto done; 308 goto done;
388 } 309 }
389 case ATM_GETADDR: 310 }
390 case ATM_GETLECSADDR: 311 /* fall through */
391 error = atm_get_addr(dev, buf, len, 312 case ATM_SETESIF:
392 (cmd == ATM_GETADDR ? 313 {
314 unsigned char esi[ESI_LEN];
315
316 if (!capable(CAP_NET_ADMIN)) {
317 error = -EPERM;
318 goto done;
319 }
320 if (copy_from_user(esi, buf, ESI_LEN)) {
321 error = -EFAULT;
322 goto done;
323 }
324 memcpy(dev->esi, esi, ESI_LEN);
325 error = ESI_LEN;
326 goto done;
327 }
328 case ATM_GETSTATZ:
329 if (!capable(CAP_NET_ADMIN)) {
330 error = -EPERM;
331 goto done;
332 }
333 /* fall through */
334 case ATM_GETSTAT:
335 size = sizeof(struct atm_dev_stats);
336 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
337 if (error)
338 goto done;
339 break;
340 case ATM_GETCIRANGE:
341 size = sizeof(struct atm_cirange);
342 if (copy_to_user(buf, &dev->ci_range, size)) {
343 error = -EFAULT;
344 goto done;
345 }
346 break;
347 case ATM_GETLINKRATE:
348 size = sizeof(int);
349 if (copy_to_user(buf, &dev->link_rate, size)) {
350 error = -EFAULT;
351 goto done;
352 }
353 break;
354 case ATM_RSTADDR:
355 if (!capable(CAP_NET_ADMIN)) {
356 error = -EPERM;
357 goto done;
358 }
359 atm_reset_addr(dev, ATM_ADDR_LOCAL);
360 break;
361 case ATM_ADDADDR:
362 case ATM_DELADDR:
363 case ATM_ADDLECSADDR:
364 case ATM_DELLECSADDR:
365 {
366 struct sockaddr_atmsvc addr;
367
368 if (!capable(CAP_NET_ADMIN)) {
369 error = -EPERM;
370 goto done;
371 }
372
373 if (copy_from_user(&addr, buf, sizeof(addr))) {
374 error = -EFAULT;
375 goto done;
376 }
377 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
378 error = atm_add_addr(dev, &addr,
379 (cmd == ATM_ADDADDR ?
393 ATM_ADDR_LOCAL : ATM_ADDR_LECS)); 380 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
394 if (error < 0) 381 else
395 goto done; 382 error = atm_del_addr(dev, &addr,
396 size = error; 383 (cmd == ATM_DELADDR ?
397 /* may return 0, but later on size == 0 means "don't 384 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
398 write the length" */ 385 goto done;
399 error = put_user(size, sioc_len) 386 }
400 ? -EFAULT : 0; 387 case ATM_GETADDR:
388 case ATM_GETLECSADDR:
389 error = atm_get_addr(dev, buf, len,
390 (cmd == ATM_GETADDR ?
391 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
392 if (error < 0)
393 goto done;
394 size = error;
395 /* may return 0, but later on size == 0 means "don't
396 write the length" */
397 error = put_user(size, sioc_len) ? -EFAULT : 0;
398 goto done;
399 case ATM_SETLOOP:
400 if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
401 __ATM_LM_XTLOC((int) (unsigned long) buf) >
402 __ATM_LM_XTRMT((int) (unsigned long) buf)) {
403 error = -EINVAL;
404 goto done;
405 }
406 /* fall through */
407 case ATM_SETCIRANGE:
408 case SONET_GETSTATZ:
409 case SONET_SETDIAG:
410 case SONET_CLRDIAG:
411 case SONET_SETFRAMING:
412 if (!capable(CAP_NET_ADMIN)) {
413 error = -EPERM;
401 goto done; 414 goto done;
402 case ATM_SETLOOP: 415 }
403 if (__ATM_LM_XTRMT((int) (unsigned long) buf) && 416 /* fall through */
404 __ATM_LM_XTLOC((int) (unsigned long) buf) > 417 default:
405 __ATM_LM_XTRMT((int) (unsigned long) buf)) { 418 if (compat) {
419#ifdef CONFIG_COMPAT
420 if (!dev->ops->compat_ioctl) {
406 error = -EINVAL; 421 error = -EINVAL;
407 goto done; 422 goto done;
408 } 423 }
409 /* fall through */ 424 size = dev->ops->compat_ioctl(dev, cmd, buf);
410 case ATM_SETCIRANGE:
411 case SONET_GETSTATZ:
412 case SONET_SETDIAG:
413 case SONET_CLRDIAG:
414 case SONET_SETFRAMING:
415 if (!capable(CAP_NET_ADMIN)) {
416 error = -EPERM;
417 goto done;
418 }
419 /* fall through */
420 default:
421 if (compat) {
422#ifdef CONFIG_COMPAT
423 if (!dev->ops->compat_ioctl) {
424 error = -EINVAL;
425 goto done;
426 }
427 size = dev->ops->compat_ioctl(dev, cmd, buf);
428#endif 425#endif
429 } else { 426 } else {
430 if (!dev->ops->ioctl) { 427 if (!dev->ops->ioctl) {
431 error = -EINVAL; 428 error = -EINVAL;
432 goto done;
433 }
434 size = dev->ops->ioctl(dev, cmd, buf);
435 }
436 if (size < 0) {
437 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
438 goto done; 429 goto done;
439 } 430 }
431 size = dev->ops->ioctl(dev, cmd, buf);
432 }
433 if (size < 0) {
434 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
435 goto done;
436 }
440 } 437 }
441 438
442 if (size) 439 if (size)
443 error = put_user(size, sioc_len) 440 error = put_user(size, sioc_len) ? -EFAULT : 0;
444 ? -EFAULT : 0;
445 else 441 else
446 error = 0; 442 error = 0;
447done: 443done:
@@ -449,21 +445,10 @@ done:
449 return error; 445 return error;
450} 446}
451 447
452static __inline__ void *dev_get_idx(loff_t left)
453{
454 struct list_head *p;
455
456 list_for_each(p, &atm_devs) {
457 if (!--left)
458 break;
459 }
460 return (p != &atm_devs) ? p : NULL;
461}
462
463void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) 448void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
464{ 449{
465 mutex_lock(&atm_dev_mutex); 450 mutex_lock(&atm_dev_mutex);
466 return *pos ? dev_get_idx(*pos) : SEQ_START_TOKEN; 451 return seq_list_start_head(&atm_devs, *pos);
467} 452}
468 453
469void atm_dev_seq_stop(struct seq_file *seq, void *v) 454void atm_dev_seq_stop(struct seq_file *seq, void *v)
@@ -473,13 +458,5 @@ void atm_dev_seq_stop(struct seq_file *seq, void *v)
473 458
474void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 459void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
475{ 460{
476 ++*pos; 461 return seq_list_next(v, &atm_devs, pos);
477 v = (v == SEQ_START_TOKEN)
478 ? atm_devs.next : ((struct list_head *)v)->next;
479 return (v == &atm_devs) ? NULL : v;
480} 462}
481
482
483EXPORT_SYMBOL(atm_dev_register);
484EXPORT_SYMBOL(atm_dev_deregister);
485EXPORT_SYMBOL(atm_dev_lookup);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 229921400522..6ba6e466ee54 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/errno.h> /* error codes */ 7#include <linux/errno.h> /* error codes */
7#include <linux/kernel.h> /* printk */ 8#include <linux/kernel.h> /* printk */
@@ -13,11 +14,11 @@
13#include <linux/atmsvc.h> 14#include <linux/atmsvc.h>
14#include <linux/atmdev.h> 15#include <linux/atmdev.h>
15#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/slab.h>
16 18
17#include "resources.h" 19#include "resources.h"
18#include "signaling.h" 20#include "signaling.h"
19 21
20
21#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets 22#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets
22 should block until the demon runs. 23 should block until the demon runs.
23 Danger: may cause nasty hangs if the demon 24 Danger: may cause nasty hangs if the demon
@@ -28,60 +29,59 @@ struct atm_vcc *sigd = NULL;
28static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep); 29static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
29#endif 30#endif
30 31
31
32static void sigd_put_skb(struct sk_buff *skb) 32static void sigd_put_skb(struct sk_buff *skb)
33{ 33{
34#ifdef WAIT_FOR_DEMON 34#ifdef WAIT_FOR_DEMON
35 DECLARE_WAITQUEUE(wait,current); 35 DECLARE_WAITQUEUE(wait, current);
36 36
37 add_wait_queue(&sigd_sleep,&wait); 37 add_wait_queue(&sigd_sleep, &wait);
38 while (!sigd) { 38 while (!sigd) {
39 set_current_state(TASK_UNINTERRUPTIBLE); 39 set_current_state(TASK_UNINTERRUPTIBLE);
40 pr_debug("atmsvc: waiting for signaling demon...\n"); 40 pr_debug("atmsvc: waiting for signaling daemon...\n");
41 schedule(); 41 schedule();
42 } 42 }
43 current->state = TASK_RUNNING; 43 current->state = TASK_RUNNING;
44 remove_wait_queue(&sigd_sleep,&wait); 44 remove_wait_queue(&sigd_sleep, &wait);
45#else 45#else
46 if (!sigd) { 46 if (!sigd) {
47 pr_debug("atmsvc: no signaling demon\n"); 47 pr_debug("atmsvc: no signaling daemon\n");
48 kfree_skb(skb); 48 kfree_skb(skb);
49 return; 49 return;
50 } 50 }
51#endif 51#endif
52 atm_force_charge(sigd,skb->truesize); 52 atm_force_charge(sigd, skb->truesize);
53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue,skb); 53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
55} 55}
56 56
57 57static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
58static void modify_qos(struct atm_vcc *vcc,struct atmsvc_msg *msg)
59{ 58{
60 struct sk_buff *skb; 59 struct sk_buff *skb;
61 60
62 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 61 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
63 !test_bit(ATM_VF_READY,&vcc->flags)) 62 !test_bit(ATM_VF_READY, &vcc->flags))
64 return; 63 return;
65 msg->type = as_error; 64 msg->type = as_error;
66 if (!vcc->dev->ops->change_qos) msg->reply = -EOPNOTSUPP; 65 if (!vcc->dev->ops->change_qos)
66 msg->reply = -EOPNOTSUPP;
67 else { 67 else {
68 /* should lock VCC */ 68 /* should lock VCC */
69 msg->reply = vcc->dev->ops->change_qos(vcc,&msg->qos, 69 msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos,
70 msg->reply); 70 msg->reply);
71 if (!msg->reply) msg->type = as_okay; 71 if (!msg->reply)
72 msg->type = as_okay;
72 } 73 }
73 /* 74 /*
74 * Should probably just turn around the old skb. But the, the buffer 75 * Should probably just turn around the old skb. But the, the buffer
75 * space accounting needs to follow the change too. Maybe later. 76 * space accounting needs to follow the change too. Maybe later.
76 */ 77 */
77 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 78 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
78 schedule(); 79 schedule();
79 *(struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)) = *msg; 80 *(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
80 sigd_put_skb(skb); 81 sigd_put_skb(skb);
81} 82}
82 83
83 84static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
84static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
85{ 85{
86 struct atmsvc_msg *msg; 86 struct atmsvc_msg *msg;
87 struct atm_vcc *session_vcc; 87 struct atm_vcc *session_vcc;
@@ -90,69 +90,68 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
90 msg = (struct atmsvc_msg *) skb->data; 90 msg = (struct atmsvc_msg *) skb->data;
91 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 91 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
92 vcc = *(struct atm_vcc **) &msg->vcc; 92 vcc = *(struct atm_vcc **) &msg->vcc;
93 pr_debug("sigd_send %d (0x%lx)\n",(int) msg->type, 93 pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
94 (unsigned long) vcc);
95 sk = sk_atm(vcc); 94 sk = sk_atm(vcc);
96 95
97 switch (msg->type) { 96 switch (msg->type) {
98 case as_okay: 97 case as_okay:
99 sk->sk_err = -msg->reply; 98 sk->sk_err = -msg->reply;
100 clear_bit(ATM_VF_WAITING, &vcc->flags); 99 clear_bit(ATM_VF_WAITING, &vcc->flags);
101 if (!*vcc->local.sas_addr.prv && 100 if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
102 !*vcc->local.sas_addr.pub) { 101 vcc->local.sas_family = AF_ATMSVC;
103 vcc->local.sas_family = AF_ATMSVC; 102 memcpy(vcc->local.sas_addr.prv,
104 memcpy(vcc->local.sas_addr.prv, 103 msg->local.sas_addr.prv, ATM_ESA_LEN);
105 msg->local.sas_addr.prv,ATM_ESA_LEN); 104 memcpy(vcc->local.sas_addr.pub,
106 memcpy(vcc->local.sas_addr.pub, 105 msg->local.sas_addr.pub, ATM_E164_LEN + 1);
107 msg->local.sas_addr.pub,ATM_E164_LEN+1); 106 }
108 } 107 session_vcc = vcc->session ? vcc->session : vcc;
109 session_vcc = vcc->session ? vcc->session : vcc; 108 if (session_vcc->vpi || session_vcc->vci)
110 if (session_vcc->vpi || session_vcc->vci) break;
111 session_vcc->itf = msg->pvc.sap_addr.itf;
112 session_vcc->vpi = msg->pvc.sap_addr.vpi;
113 session_vcc->vci = msg->pvc.sap_addr.vci;
114 if (session_vcc->vpi || session_vcc->vci)
115 session_vcc->qos = msg->qos;
116 break;
117 case as_error:
118 clear_bit(ATM_VF_REGIS,&vcc->flags);
119 clear_bit(ATM_VF_READY,&vcc->flags);
120 sk->sk_err = -msg->reply;
121 clear_bit(ATM_VF_WAITING, &vcc->flags);
122 break; 109 break;
123 case as_indicate: 110 session_vcc->itf = msg->pvc.sap_addr.itf;
124 vcc = *(struct atm_vcc **) &msg->listen_vcc; 111 session_vcc->vpi = msg->pvc.sap_addr.vpi;
125 sk = sk_atm(vcc); 112 session_vcc->vci = msg->pvc.sap_addr.vci;
126 pr_debug("as_indicate!!!\n"); 113 if (session_vcc->vpi || session_vcc->vci)
127 lock_sock(sk); 114 session_vcc->qos = msg->qos;
128 if (sk_acceptq_is_full(sk)) { 115 break;
129 sigd_enq(NULL,as_reject,vcc,NULL,NULL); 116 case as_error:
130 dev_kfree_skb(skb); 117 clear_bit(ATM_VF_REGIS, &vcc->flags);
131 goto as_indicate_complete; 118 clear_bit(ATM_VF_READY, &vcc->flags);
132 } 119 sk->sk_err = -msg->reply;
133 sk->sk_ack_backlog++; 120 clear_bit(ATM_VF_WAITING, &vcc->flags);
134 skb_queue_tail(&sk->sk_receive_queue, skb); 121 break;
135 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); 122 case as_indicate:
136 sk->sk_state_change(sk); 123 vcc = *(struct atm_vcc **)&msg->listen_vcc;
124 sk = sk_atm(vcc);
125 pr_debug("as_indicate!!!\n");
126 lock_sock(sk);
127 if (sk_acceptq_is_full(sk)) {
128 sigd_enq(NULL, as_reject, vcc, NULL, NULL);
129 dev_kfree_skb(skb);
130 goto as_indicate_complete;
131 }
132 sk->sk_ack_backlog++;
133 skb_queue_tail(&sk->sk_receive_queue, skb);
134 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
135 sk->sk_state_change(sk);
137as_indicate_complete: 136as_indicate_complete:
138 release_sock(sk); 137 release_sock(sk);
139 return 0; 138 return 0;
140 case as_close: 139 case as_close:
141 set_bit(ATM_VF_RELEASED,&vcc->flags); 140 set_bit(ATM_VF_RELEASED, &vcc->flags);
142 vcc_release_async(vcc, msg->reply); 141 vcc_release_async(vcc, msg->reply);
143 goto out; 142 goto out;
144 case as_modify: 143 case as_modify:
145 modify_qos(vcc,msg); 144 modify_qos(vcc, msg);
146 break; 145 break;
147 case as_addparty: 146 case as_addparty:
148 case as_dropparty: 147 case as_dropparty:
149 sk->sk_err_soft = msg->reply; /* < 0 failure, otherwise ep_ref */ 148 sk->sk_err_soft = msg->reply;
150 clear_bit(ATM_VF_WAITING, &vcc->flags); 149 /* < 0 failure, otherwise ep_ref */
151 break; 150 clear_bit(ATM_VF_WAITING, &vcc->flags);
152 default: 151 break;
153 printk(KERN_ALERT "sigd_send: bad message type %d\n", 152 default:
154 (int) msg->type); 153 pr_alert("bad message type %d\n", (int)msg->type);
155 return -EINVAL; 154 return -EINVAL;
156 } 155 }
157 sk->sk_state_change(sk); 156 sk->sk_state_change(sk);
158out: 157out:
@@ -160,48 +159,52 @@ out:
160 return 0; 159 return 0;
161} 160}
162 161
163 162void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
164void sigd_enq2(struct atm_vcc *vcc,enum atmsvc_msg_type type, 163 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
165 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 164 const struct sockaddr_atmsvc *svc, const struct atm_qos *qos,
166 const struct sockaddr_atmsvc *svc,const struct atm_qos *qos,int reply) 165 int reply)
167{ 166{
168 struct sk_buff *skb; 167 struct sk_buff *skb;
169 struct atmsvc_msg *msg; 168 struct atmsvc_msg *msg;
170 static unsigned session = 0; 169 static unsigned session = 0;
171 170
172 pr_debug("sigd_enq %d (0x%p)\n",(int) type,vcc); 171 pr_debug("%d (0x%p)\n", (int)type, vcc);
173 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 172 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
174 schedule(); 173 schedule();
175 msg = (struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)); 174 msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg));
176 memset(msg,0,sizeof(*msg)); 175 memset(msg, 0, sizeof(*msg));
177 msg->type = type; 176 msg->type = type;
178 *(struct atm_vcc **) &msg->vcc = vcc; 177 *(struct atm_vcc **) &msg->vcc = vcc;
179 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc; 178 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
180 msg->reply = reply; 179 msg->reply = reply;
181 if (qos) msg->qos = *qos; 180 if (qos)
182 if (vcc) msg->sap = vcc->sap; 181 msg->qos = *qos;
183 if (svc) msg->svc = *svc; 182 if (vcc)
184 if (vcc) msg->local = vcc->local; 183 msg->sap = vcc->sap;
185 if (pvc) msg->pvc = *pvc; 184 if (svc)
185 msg->svc = *svc;
186 if (vcc)
187 msg->local = vcc->local;
188 if (pvc)
189 msg->pvc = *pvc;
186 if (vcc) { 190 if (vcc) {
187 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags)) 191 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags))
188 msg->session = ++session; 192 msg->session = ++session;
189 /* every new pmp connect gets the next session number */ 193 /* every new pmp connect gets the next session number */
190 } 194 }
191 sigd_put_skb(skb); 195 sigd_put_skb(skb);
192 if (vcc) set_bit(ATM_VF_REGIS,&vcc->flags); 196 if (vcc)
197 set_bit(ATM_VF_REGIS, &vcc->flags);
193} 198}
194 199
195 200void sigd_enq(struct atm_vcc *vcc, enum atmsvc_msg_type type,
196void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type, 201 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
197 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 202 const struct sockaddr_atmsvc *svc)
198 const struct sockaddr_atmsvc *svc)
199{ 203{
200 sigd_enq2(vcc,type,listen_vcc,pvc,svc,vcc ? &vcc->qos : NULL,0); 204 sigd_enq2(vcc, type, listen_vcc, pvc, svc, vcc ? &vcc->qos : NULL, 0);
201 /* other ISP applications may use "reply" */ 205 /* other ISP applications may use "reply" */
202} 206}
203 207
204
205static void purge_vcc(struct atm_vcc *vcc) 208static void purge_vcc(struct atm_vcc *vcc)
206{ 209{
207 if (sk_atm(vcc)->sk_family == PF_ATMSVC && 210 if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
@@ -212,21 +215,20 @@ static void purge_vcc(struct atm_vcc *vcc)
212 } 215 }
213} 216}
214 217
215
216static void sigd_close(struct atm_vcc *vcc) 218static void sigd_close(struct atm_vcc *vcc)
217{ 219{
218 struct hlist_node *node; 220 struct hlist_node *node;
219 struct sock *s; 221 struct sock *s;
220 int i; 222 int i;
221 223
222 pr_debug("sigd_close\n"); 224 pr_debug("\n");
223 sigd = NULL; 225 sigd = NULL;
224 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 226 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
225 printk(KERN_ERR "sigd_close: closing with requests pending\n"); 227 pr_err("closing with requests pending\n");
226 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); 228 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
227 229
228 read_lock(&vcc_sklist_lock); 230 read_lock(&vcc_sklist_lock);
229 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 231 for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
230 struct hlist_head *head = &vcc_hash[i]; 232 struct hlist_head *head = &vcc_hash[i];
231 233
232 sk_for_each(s, node, head) { 234 sk_for_each(s, node, head) {
@@ -238,13 +240,11 @@ static void sigd_close(struct atm_vcc *vcc)
238 read_unlock(&vcc_sklist_lock); 240 read_unlock(&vcc_sklist_lock);
239} 241}
240 242
241
242static struct atmdev_ops sigd_dev_ops = { 243static struct atmdev_ops sigd_dev_ops = {
243 .close = sigd_close, 244 .close = sigd_close,
244 .send = sigd_send 245 .send = sigd_send
245}; 246};
246 247
247
248static struct atm_dev sigd_dev = { 248static struct atm_dev sigd_dev = {
249 .ops = &sigd_dev_ops, 249 .ops = &sigd_dev_ops,
250 .type = "sig", 250 .type = "sig",
@@ -252,16 +252,16 @@ static struct atm_dev sigd_dev = {
252 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock) 252 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
253}; 253};
254 254
255
256int sigd_attach(struct atm_vcc *vcc) 255int sigd_attach(struct atm_vcc *vcc)
257{ 256{
258 if (sigd) return -EADDRINUSE; 257 if (sigd)
259 pr_debug("sigd_attach\n"); 258 return -EADDRINUSE;
259 pr_debug("\n");
260 sigd = vcc; 260 sigd = vcc;
261 vcc->dev = &sigd_dev; 261 vcc->dev = &sigd_dev;
262 vcc_insert_socket(sk_atm(vcc)); 262 vcc_insert_socket(sk_atm(vcc));
263 set_bit(ATM_VF_META,&vcc->flags); 263 set_bit(ATM_VF_META, &vcc->flags);
264 set_bit(ATM_VF_READY,&vcc->flags); 264 set_bit(ATM_VF_READY, &vcc->flags);
265#ifdef WAIT_FOR_DEMON 265#ifdef WAIT_FOR_DEMON
266 wake_up(&sigd_sleep); 266 wake_up(&sigd_sleep);
267#endif 267#endif
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 66e1d9b3e5de..3ba9a45a51ac 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/string.h> 7#include <linux/string.h>
7#include <linux/net.h> /* struct socket, struct proto_ops */ 8#include <linux/net.h> /* struct socket, struct proto_ops */
@@ -18,14 +19,15 @@
18#include <linux/atmdev.h> 19#include <linux/atmdev.h>
19#include <linux/bitops.h> 20#include <linux/bitops.h>
20#include <net/sock.h> /* for sock_no_* */ 21#include <net/sock.h> /* for sock_no_* */
21#include <asm/uaccess.h> 22#include <linux/uaccess.h>
22 23
23#include "resources.h" 24#include "resources.h"
24#include "common.h" /* common for PVCs and SVCs */ 25#include "common.h" /* common for PVCs and SVCs */
25#include "signaling.h" 26#include "signaling.h"
26#include "addr.h" 27#include "addr.h"
27 28
28static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); 29static int svc_create(struct net *net, struct socket *sock, int protocol,
30 int kern);
29 31
30/* 32/*
31 * Note: since all this is still nicely synchronized with the signaling demon, 33 * Note: since all this is still nicely synchronized with the signaling demon,
@@ -34,25 +36,25 @@ static int svc_create(struct net *net, struct socket *sock, int protocol, int ke
34 */ 36 */
35 37
36 38
37static int svc_shutdown(struct socket *sock,int how) 39static int svc_shutdown(struct socket *sock, int how)
38{ 40{
39 return 0; 41 return 0;
40} 42}
41 43
42
43static void svc_disconnect(struct atm_vcc *vcc) 44static void svc_disconnect(struct atm_vcc *vcc)
44{ 45{
45 DEFINE_WAIT(wait); 46 DEFINE_WAIT(wait);
46 struct sk_buff *skb; 47 struct sk_buff *skb;
47 struct sock *sk = sk_atm(vcc); 48 struct sock *sk = sk_atm(vcc);
48 49
49 pr_debug("svc_disconnect %p\n",vcc); 50 pr_debug("%p\n", vcc);
50 if (test_bit(ATM_VF_REGIS,&vcc->flags)) { 51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
51 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 52 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
52 sigd_enq(vcc,as_close,NULL,NULL,NULL); 53 sigd_enq(vcc, as_close, NULL, NULL, NULL);
53 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) && sigd) { 54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
54 schedule(); 55 schedule();
55 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 56 prepare_to_wait(sk->sk_sleep, &wait,
57 TASK_UNINTERRUPTIBLE);
56 } 58 }
57 finish_wait(sk->sk_sleep, &wait); 59 finish_wait(sk->sk_sleep, &wait);
58 } 60 }
@@ -61,35 +63,35 @@ static void svc_disconnect(struct atm_vcc *vcc)
61 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 63 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
62 atm_return(vcc, skb->truesize); 64 atm_return(vcc, skb->truesize);
63 pr_debug("LISTEN REL\n"); 65 pr_debug("LISTEN REL\n");
64 sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0); 66 sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0);
65 dev_kfree_skb(skb); 67 dev_kfree_skb(skb);
66 } 68 }
67 clear_bit(ATM_VF_REGIS, &vcc->flags); 69 clear_bit(ATM_VF_REGIS, &vcc->flags);
68 /* ... may retry later */ 70 /* ... may retry later */
69} 71}
70 72
71
72static int svc_release(struct socket *sock) 73static int svc_release(struct socket *sock)
73{ 74{
74 struct sock *sk = sock->sk; 75 struct sock *sk = sock->sk;
75 struct atm_vcc *vcc; 76 struct atm_vcc *vcc;
76 77
77 if (sk) { 78 if (sk) {
78 vcc = ATM_SD(sock); 79 vcc = ATM_SD(sock);
79 pr_debug("svc_release %p\n", vcc); 80 pr_debug("%p\n", vcc);
80 clear_bit(ATM_VF_READY, &vcc->flags); 81 clear_bit(ATM_VF_READY, &vcc->flags);
81 /* VCC pointer is used as a reference, so we must not free it 82 /*
82 (thereby subjecting it to re-use) before all pending connections 83 * VCC pointer is used as a reference,
83 are closed */ 84 * so we must not free it (thereby subjecting it to re-use)
85 * before all pending connections are closed
86 */
84 svc_disconnect(vcc); 87 svc_disconnect(vcc);
85 vcc_release(sock); 88 vcc_release(sock);
86 } 89 }
87 return 0; 90 return 0;
88} 91}
89 92
90 93static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
91static int svc_bind(struct socket *sock,struct sockaddr *sockaddr, 94 int sockaddr_len)
92 int sockaddr_len)
93{ 95{
94 DEFINE_WAIT(wait); 96 DEFINE_WAIT(wait);
95 struct sock *sk = sock->sk; 97 struct sock *sk = sock->sk;
@@ -114,38 +116,37 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
114 error = -EAFNOSUPPORT; 116 error = -EAFNOSUPPORT;
115 goto out; 117 goto out;
116 } 118 }
117 clear_bit(ATM_VF_BOUND,&vcc->flags); 119 clear_bit(ATM_VF_BOUND, &vcc->flags);
118 /* failing rebind will kill old binding */ 120 /* failing rebind will kill old binding */
119 /* @@@ check memory (de)allocation on rebind */ 121 /* @@@ check memory (de)allocation on rebind */
120 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) { 122 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
121 error = -EBADFD; 123 error = -EBADFD;
122 goto out; 124 goto out;
123 } 125 }
124 vcc->local = *addr; 126 vcc->local = *addr;
125 set_bit(ATM_VF_WAITING, &vcc->flags); 127 set_bit(ATM_VF_WAITING, &vcc->flags);
126 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 128 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
127 sigd_enq(vcc,as_bind,NULL,NULL,&vcc->local); 129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
128 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
129 schedule(); 131 schedule();
130 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 132 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
131 } 133 }
132 finish_wait(sk->sk_sleep, &wait); 134 finish_wait(sk->sk_sleep, &wait);
133 clear_bit(ATM_VF_REGIS,&vcc->flags); /* doesn't count */ 135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
134 if (!sigd) { 136 if (!sigd) {
135 error = -EUNATCH; 137 error = -EUNATCH;
136 goto out; 138 goto out;
137 } 139 }
138 if (!sk->sk_err) 140 if (!sk->sk_err)
139 set_bit(ATM_VF_BOUND,&vcc->flags); 141 set_bit(ATM_VF_BOUND, &vcc->flags);
140 error = -sk->sk_err; 142 error = -sk->sk_err;
141out: 143out:
142 release_sock(sk); 144 release_sock(sk);
143 return error; 145 return error;
144} 146}
145 147
146 148static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
147static int svc_connect(struct socket *sock,struct sockaddr *sockaddr, 149 int sockaddr_len, int flags)
148 int sockaddr_len,int flags)
149{ 150{
150 DEFINE_WAIT(wait); 151 DEFINE_WAIT(wait);
151 struct sock *sk = sock->sk; 152 struct sock *sk = sock->sk;
@@ -153,7 +154,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
153 struct atm_vcc *vcc = ATM_SD(sock); 154 struct atm_vcc *vcc = ATM_SD(sock);
154 int error; 155 int error;
155 156
156 pr_debug("svc_connect %p\n",vcc); 157 pr_debug("%p\n", vcc);
157 lock_sock(sk); 158 lock_sock(sk);
158 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { 159 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
159 error = -EINVAL; 160 error = -EINVAL;
@@ -201,7 +202,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
201 vcc->remote = *addr; 202 vcc->remote = *addr;
202 set_bit(ATM_VF_WAITING, &vcc->flags); 203 set_bit(ATM_VF_WAITING, &vcc->flags);
203 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 204 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
204 sigd_enq(vcc,as_connect,NULL,NULL,&vcc->remote); 205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
205 if (flags & O_NONBLOCK) { 206 if (flags & O_NONBLOCK) {
206 finish_wait(sk->sk_sleep, &wait); 207 finish_wait(sk->sk_sleep, &wait);
207 sock->state = SS_CONNECTING; 208 sock->state = SS_CONNECTING;
@@ -212,7 +213,8 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
212 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
213 schedule(); 214 schedule();
214 if (!signal_pending(current)) { 215 if (!signal_pending(current)) {
215 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 216 prepare_to_wait(sk->sk_sleep, &wait,
217 TASK_INTERRUPTIBLE);
216 continue; 218 continue;
217 } 219 }
218 pr_debug("*ABORT*\n"); 220 pr_debug("*ABORT*\n");
@@ -228,20 +230,22 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
228 * Kernel <--okay---- Demon 230 * Kernel <--okay---- Demon
229 * Kernel <--close--- Demon 231 * Kernel <--close--- Demon
230 */ 232 */
231 sigd_enq(vcc,as_close,NULL,NULL,NULL); 233 sigd_enq(vcc, as_close, NULL, NULL, NULL);
232 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
233 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 235 prepare_to_wait(sk->sk_sleep, &wait,
236 TASK_INTERRUPTIBLE);
234 schedule(); 237 schedule();
235 } 238 }
236 if (!sk->sk_err) 239 if (!sk->sk_err)
237 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) 240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
238 && sigd) { 241 sigd) {
239 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 242 prepare_to_wait(sk->sk_sleep, &wait,
243 TASK_INTERRUPTIBLE);
240 schedule(); 244 schedule();
241 } 245 }
242 clear_bit(ATM_VF_REGIS,&vcc->flags); 246 clear_bit(ATM_VF_REGIS, &vcc->flags);
243 clear_bit(ATM_VF_RELEASED,&vcc->flags); 247 clear_bit(ATM_VF_RELEASED, &vcc->flags);
244 clear_bit(ATM_VF_CLOSE,&vcc->flags); 248 clear_bit(ATM_VF_CLOSE, &vcc->flags);
245 /* we're gone now but may connect later */ 249 /* we're gone now but may connect later */
246 error = -EINTR; 250 error = -EINTR;
247 break; 251 break;
@@ -269,37 +273,37 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
269/* 273/*
270 * #endif 274 * #endif
271 */ 275 */
272 if (!(error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci))) 276 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
277 if (!error)
273 sock->state = SS_CONNECTED; 278 sock->state = SS_CONNECTED;
274 else 279 else
275 (void) svc_disconnect(vcc); 280 (void)svc_disconnect(vcc);
276out: 281out:
277 release_sock(sk); 282 release_sock(sk);
278 return error; 283 return error;
279} 284}
280 285
281 286static int svc_listen(struct socket *sock, int backlog)
282static int svc_listen(struct socket *sock,int backlog)
283{ 287{
284 DEFINE_WAIT(wait); 288 DEFINE_WAIT(wait);
285 struct sock *sk = sock->sk; 289 struct sock *sk = sock->sk;
286 struct atm_vcc *vcc = ATM_SD(sock); 290 struct atm_vcc *vcc = ATM_SD(sock);
287 int error; 291 int error;
288 292
289 pr_debug("svc_listen %p\n",vcc); 293 pr_debug("%p\n", vcc);
290 lock_sock(sk); 294 lock_sock(sk);
291 /* let server handle listen on unbound sockets */ 295 /* let server handle listen on unbound sockets */
292 if (test_bit(ATM_VF_SESSION,&vcc->flags)) { 296 if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
293 error = -EINVAL; 297 error = -EINVAL;
294 goto out; 298 goto out;
295 } 299 }
296 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { 300 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
297 error = -EADDRINUSE; 301 error = -EADDRINUSE;
298 goto out; 302 goto out;
299 } 303 }
300 set_bit(ATM_VF_WAITING, &vcc->flags); 304 set_bit(ATM_VF_WAITING, &vcc->flags);
301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
303 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
304 schedule(); 308 schedule();
305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 309 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
@@ -309,7 +313,7 @@ static int svc_listen(struct socket *sock,int backlog)
309 error = -EUNATCH; 313 error = -EUNATCH;
310 goto out; 314 goto out;
311 } 315 }
312 set_bit(ATM_VF_LISTEN,&vcc->flags); 316 set_bit(ATM_VF_LISTEN, &vcc->flags);
313 vcc_insert_socket(sk); 317 vcc_insert_socket(sk);
314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 318 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
315 error = -sk->sk_err; 319 error = -sk->sk_err;
@@ -318,8 +322,7 @@ out:
318 return error; 322 return error;
319} 323}
320 324
321 325static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
322static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
323{ 326{
324 struct sock *sk = sock->sk; 327 struct sock *sk = sock->sk;
325 struct sk_buff *skb; 328 struct sk_buff *skb;
@@ -336,15 +339,16 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
336 339
337 new_vcc = ATM_SD(newsock); 340 new_vcc = ATM_SD(newsock);
338 341
339 pr_debug("svc_accept %p -> %p\n",old_vcc,new_vcc); 342 pr_debug("%p -> %p\n", old_vcc, new_vcc);
340 while (1) { 343 while (1) {
341 DEFINE_WAIT(wait); 344 DEFINE_WAIT(wait);
342 345
343 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 346 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
344 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && 347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
345 sigd) { 348 sigd) {
346 if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break; 349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
347 if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) { 350 break;
351 if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) {
348 error = -sk->sk_err; 352 error = -sk->sk_err;
349 break; 353 break;
350 } 354 }
@@ -359,7 +363,8 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
359 error = -ERESTARTSYS; 363 error = -ERESTARTSYS;
360 break; 364 break;
361 } 365 }
362 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 366 prepare_to_wait(sk->sk_sleep, &wait,
367 TASK_INTERRUPTIBLE);
363 } 368 }
364 finish_wait(sk->sk_sleep, &wait); 369 finish_wait(sk->sk_sleep, &wait);
365 if (error) 370 if (error)
@@ -368,31 +373,34 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
368 error = -EUNATCH; 373 error = -EUNATCH;
369 goto out; 374 goto out;
370 } 375 }
371 msg = (struct atmsvc_msg *) skb->data; 376 msg = (struct atmsvc_msg *)skb->data;
372 new_vcc->qos = msg->qos; 377 new_vcc->qos = msg->qos;
373 set_bit(ATM_VF_HASQOS,&new_vcc->flags); 378 set_bit(ATM_VF_HASQOS, &new_vcc->flags);
374 new_vcc->remote = msg->svc; 379 new_vcc->remote = msg->svc;
375 new_vcc->local = msg->local; 380 new_vcc->local = msg->local;
376 new_vcc->sap = msg->sap; 381 new_vcc->sap = msg->sap;
377 error = vcc_connect(newsock, msg->pvc.sap_addr.itf, 382 error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
378 msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci); 383 msg->pvc.sap_addr.vpi,
384 msg->pvc.sap_addr.vci);
379 dev_kfree_skb(skb); 385 dev_kfree_skb(skb);
380 sk->sk_ack_backlog--; 386 sk->sk_ack_backlog--;
381 if (error) { 387 if (error) {
382 sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL, 388 sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
383 &old_vcc->qos,error); 389 &old_vcc->qos, error);
384 error = error == -EAGAIN ? -EBUSY : error; 390 error = error == -EAGAIN ? -EBUSY : error;
385 goto out; 391 goto out;
386 } 392 }
387 /* wait should be short, so we ignore the non-blocking flag */ 393 /* wait should be short, so we ignore the non-blocking flag */
388 set_bit(ATM_VF_WAITING, &new_vcc->flags); 394 set_bit(ATM_VF_WAITING, &new_vcc->flags);
389 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
390 sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL); 396 TASK_UNINTERRUPTIBLE);
397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
391 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { 398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
392 release_sock(sk); 399 release_sock(sk);
393 schedule(); 400 schedule();
394 lock_sock(sk); 401 lock_sock(sk);
395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 402 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
403 TASK_UNINTERRUPTIBLE);
396 } 404 }
397 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); 405 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
398 if (!sigd) { 406 if (!sigd) {
@@ -412,39 +420,37 @@ out:
412 return error; 420 return error;
413} 421}
414 422
415 423static int svc_getname(struct socket *sock, struct sockaddr *sockaddr,
416static int svc_getname(struct socket *sock,struct sockaddr *sockaddr, 424 int *sockaddr_len, int peer)
417 int *sockaddr_len,int peer)
418{ 425{
419 struct sockaddr_atmsvc *addr; 426 struct sockaddr_atmsvc *addr;
420 427
421 *sockaddr_len = sizeof(struct sockaddr_atmsvc); 428 *sockaddr_len = sizeof(struct sockaddr_atmsvc);
422 addr = (struct sockaddr_atmsvc *) sockaddr; 429 addr = (struct sockaddr_atmsvc *) sockaddr;
423 memcpy(addr,peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local, 430 memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
424 sizeof(struct sockaddr_atmsvc)); 431 sizeof(struct sockaddr_atmsvc));
425 return 0; 432 return 0;
426} 433}
427 434
428 435int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
429int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
430{ 436{
431 struct sock *sk = sk_atm(vcc); 437 struct sock *sk = sk_atm(vcc);
432 DEFINE_WAIT(wait); 438 DEFINE_WAIT(wait);
433 439
434 set_bit(ATM_VF_WAITING, &vcc->flags); 440 set_bit(ATM_VF_WAITING, &vcc->flags);
435 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 441 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
436 sigd_enq2(vcc,as_modify,NULL,NULL,&vcc->local,qos,0); 442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
437 while (test_bit(ATM_VF_WAITING, &vcc->flags) && 443 while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
438 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
439 schedule(); 445 schedule();
440 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 446 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
441 } 447 }
442 finish_wait(sk->sk_sleep, &wait); 448 finish_wait(sk->sk_sleep, &wait);
443 if (!sigd) return -EUNATCH; 449 if (!sigd)
450 return -EUNATCH;
444 return -sk->sk_err; 451 return -sk->sk_err;
445} 452}
446 453
447
448static int svc_setsockopt(struct socket *sock, int level, int optname, 454static int svc_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, unsigned int optlen) 455 char __user *optval, unsigned int optlen)
450{ 456{
@@ -454,37 +460,35 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
454 460
455 lock_sock(sk); 461 lock_sock(sk);
456 switch (optname) { 462 switch (optname) {
457 case SO_ATMSAP: 463 case SO_ATMSAP:
458 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) { 464 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
459 error = -EINVAL; 465 error = -EINVAL;
460 goto out; 466 goto out;
461 } 467 }
462 if (copy_from_user(&vcc->sap, optval, optlen)) { 468 if (copy_from_user(&vcc->sap, optval, optlen)) {
463 error = -EFAULT; 469 error = -EFAULT;
464 goto out; 470 goto out;
465 } 471 }
466 set_bit(ATM_VF_HASSAP, &vcc->flags); 472 set_bit(ATM_VF_HASSAP, &vcc->flags);
467 break; 473 break;
468 case SO_MULTIPOINT: 474 case SO_MULTIPOINT:
469 if (level != SOL_ATM || optlen != sizeof(int)) { 475 if (level != SOL_ATM || optlen != sizeof(int)) {
470 error = -EINVAL; 476 error = -EINVAL;
471 goto out; 477 goto out;
472 } 478 }
473 if (get_user(value, (int __user *) optval)) { 479 if (get_user(value, (int __user *)optval)) {
474 error = -EFAULT; 480 error = -EFAULT;
475 goto out; 481 goto out;
476 } 482 }
477 if (value == 1) { 483 if (value == 1)
478 set_bit(ATM_VF_SESSION, &vcc->flags); 484 set_bit(ATM_VF_SESSION, &vcc->flags);
479 } else if (value == 0) { 485 else if (value == 0)
480 clear_bit(ATM_VF_SESSION, &vcc->flags); 486 clear_bit(ATM_VF_SESSION, &vcc->flags);
481 } else { 487 else
482 error = -EINVAL; 488 error = -EINVAL;
483 } 489 break;
484 break; 490 default:
485 default: 491 error = vcc_setsockopt(sock, level, optname, optval, optlen);
486 error = vcc_setsockopt(sock, level, optname,
487 optval, optlen);
488 } 492 }
489 493
490out: 494out:
@@ -492,9 +496,8 @@ out:
492 return error; 496 return error;
493} 497}
494 498
495 499static int svc_getsockopt(struct socket *sock, int level, int optname,
496static int svc_getsockopt(struct socket *sock,int level,int optname, 500 char __user *optval, int __user *optlen)
497 char __user *optval,int __user *optlen)
498{ 501{
499 struct sock *sk = sock->sk; 502 struct sock *sk = sock->sk;
500 int error = 0, len; 503 int error = 0, len;
@@ -521,7 +524,6 @@ out:
521 return error; 524 return error;
522} 525}
523 526
524
525static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, 527static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
526 int sockaddr_len, int flags) 528 int sockaddr_len, int flags)
527{ 529{
@@ -540,7 +542,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
540 error = -EINPROGRESS; 542 error = -EINPROGRESS;
541 goto out; 543 goto out;
542 } 544 }
543 pr_debug("svc_addparty added wait queue\n"); 545 pr_debug("added wait queue\n");
544 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
545 schedule(); 547 schedule();
546 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 548 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
@@ -552,7 +554,6 @@ out:
552 return error; 554 return error;
553} 555}
554 556
555
556static int svc_dropparty(struct socket *sock, int ep_ref) 557static int svc_dropparty(struct socket *sock, int ep_ref)
557{ 558{
558 DEFINE_WAIT(wait); 559 DEFINE_WAIT(wait);
@@ -579,7 +580,6 @@ out:
579 return error; 580 return error;
580} 581}
581 582
582
583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
584{ 584{
585 int error, ep_ref; 585 int error, ep_ref;
@@ -587,29 +587,31 @@ static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
587 struct atm_vcc *vcc = ATM_SD(sock); 587 struct atm_vcc *vcc = ATM_SD(sock);
588 588
589 switch (cmd) { 589 switch (cmd) {
590 case ATM_ADDPARTY: 590 case ATM_ADDPARTY:
591 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 591 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
592 return -EINVAL; 592 return -EINVAL;
593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) 593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
594 return -EFAULT; 594 return -EFAULT;
595 error = svc_addparty(sock, (struct sockaddr *) &sa, sizeof(sa), 0); 595 error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa),
596 break; 596 0);
597 case ATM_DROPPARTY: 597 break;
598 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 598 case ATM_DROPPARTY:
599 return -EINVAL; 599 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
600 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) 600 return -EINVAL;
601 return -EFAULT; 601 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
602 error = svc_dropparty(sock, ep_ref); 602 return -EFAULT;
603 break; 603 error = svc_dropparty(sock, ep_ref);
604 default: 604 break;
605 error = vcc_ioctl(sock, cmd, arg); 605 default:
606 error = vcc_ioctl(sock, cmd, arg);
606 } 607 }
607 608
608 return error; 609 return error;
609} 610}
610 611
611#ifdef CONFIG_COMPAT 612#ifdef CONFIG_COMPAT
612static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 613static int svc_compat_ioctl(struct socket *sock, unsigned int cmd,
614 unsigned long arg)
613{ 615{
614 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf. 616 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf.
615 But actually it takes a struct sockaddr_atmsvc, which doesn't need 617 But actually it takes a struct sockaddr_atmsvc, which doesn't need
@@ -660,13 +662,13 @@ static int svc_create(struct net *net, struct socket *sock, int protocol,
660 662
661 sock->ops = &svc_proto_ops; 663 sock->ops = &svc_proto_ops;
662 error = vcc_create(net, sock, protocol, AF_ATMSVC); 664 error = vcc_create(net, sock, protocol, AF_ATMSVC);
663 if (error) return error; 665 if (error)
666 return error;
664 ATM_SD(sock)->local.sas_family = AF_ATMSVC; 667 ATM_SD(sock)->local.sas_family = AF_ATMSVC;
665 ATM_SD(sock)->remote.sas_family = AF_ATMSVC; 668 ATM_SD(sock)->remote.sas_family = AF_ATMSVC;
666 return 0; 669 return 0;
667} 670}
668 671
669
670static const struct net_proto_family svc_family_ops = { 672static const struct net_proto_family svc_family_ops = {
671 .family = PF_ATMSVC, 673 .family = PF_ATMSVC,
672 .create = svc_create, 674 .create = svc_create,
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 5588ba69c468..65c5801261f9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -25,6 +25,7 @@
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/sockios.h> 26#include <linux/sockios.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/slab.h>
28#include <net/ax25.h> 29#include <net/ax25.h>
29#include <linux/inet.h> 30#include <linux/inet.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
@@ -1863,25 +1864,13 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1863static void *ax25_info_start(struct seq_file *seq, loff_t *pos) 1864static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
1864 __acquires(ax25_list_lock) 1865 __acquires(ax25_list_lock)
1865{ 1866{
1866 struct ax25_cb *ax25;
1867 struct hlist_node *node;
1868 int i = 0;
1869
1870 spin_lock_bh(&ax25_list_lock); 1867 spin_lock_bh(&ax25_list_lock);
1871 ax25_for_each(ax25, node, &ax25_list) { 1868 return seq_hlist_start(&ax25_list, *pos);
1872 if (i == *pos)
1873 return ax25;
1874 ++i;
1875 }
1876 return NULL;
1877} 1869}
1878 1870
1879static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos) 1871static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
1880{ 1872{
1881 ++*pos; 1873 return seq_hlist_next(v, &ax25_list, pos);
1882
1883 return hlist_entry( ((struct ax25_cb *)v)->ax25_node.next,
1884 struct ax25_cb, ax25_node);
1885} 1874}
1886 1875
1887static void ax25_info_stop(struct seq_file *seq, void *v) 1876static void ax25_info_stop(struct seq_file *seq, void *v)
@@ -1892,7 +1881,7 @@ static void ax25_info_stop(struct seq_file *seq, void *v)
1892 1881
1893static int ax25_info_show(struct seq_file *seq, void *v) 1882static int ax25_info_show(struct seq_file *seq, void *v)
1894{ 1883{
1895 ax25_cb *ax25 = v; 1884 ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
1896 char buf[11]; 1885 char buf[11];
1897 int k; 1886 int k;
1898 1887
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index a7a0e0c9698b..c1cb982f6e86 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/slab.h>
12#include <linux/in.h> 13#include <linux/in.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/timer.h> 15#include <linux/timer.h>
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c
index b5e59787be2f..85816e612dc0 100644
--- a/net/ax25/ax25_ds_subr.c
+++ b/net/ax25/ax25_ds_subr.c
@@ -17,6 +17,7 @@
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/gfp.h>
20#include <net/ax25.h> 21#include <net/ax25.h>
21#include <linux/inet.h> 22#include <linux/inet.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 71338f112108..5a0dda8df492 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/sockios.h> 18#include <linux/sockios.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/slab.h>
20#include <net/ax25.h> 21#include <net/ax25.h>
21#include <linux/inet.h> 22#include <linux/inet.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index de56d3983de0..9bb776541203 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -18,6 +18,7 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/net.h> 20#include <linux/net.h>
21#include <linux/slab.h>
21#include <net/ax25.h> 22#include <net/ax25.h>
22#include <linux/inet.h> 23#include <linux/inet.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index f047a57aa95c..cf0c47a26530 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index 14912600ec57..37507d806f65 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -19,6 +19,7 @@
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/net.h> 21#include <linux/net.h>
22#include <linux/slab.h>
22#include <net/ax25.h> 23#include <net/ax25.h>
23#include <linux/inet.h> 24#include <linux/inet.h>
24#include <linux/netdevice.h> 25#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index c833ba4c45a5..7805945a5fd6 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -23,6 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/sockios.h> 24#include <linux/sockios.h>
25#include <linux/net.h> 25#include <linux/net.h>
26#include <linux/slab.h>
26#include <net/ax25.h> 27#include <net/ax25.h>
27#include <linux/inet.h> 28#include <linux/inet.h>
28#include <linux/netdevice.h> 29#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 034aa10a5198..c6715ee4ab8f 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -18,6 +18,7 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/net.h> 20#include <linux/net.h>
21#include <linux/slab.h>
21#include <net/ax25.h> 22#include <net/ax25.h>
22#include <linux/inet.h> 23#include <linux/inet.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 832bcf092a01..d349be9578f5 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -18,6 +18,7 @@
18#include <linux/sockios.h> 18#include <linux/sockios.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/slab.h>
21#include <net/ax25.h> 22#include <net/ax25.h>
22#include <linux/inet.h> 23#include <linux/inet.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
@@ -146,31 +147,13 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
146static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) 147static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
147 __acquires(ax25_uid_lock) 148 __acquires(ax25_uid_lock)
148{ 149{
149 struct ax25_uid_assoc *pt;
150 struct hlist_node *node;
151 int i = 1;
152
153 read_lock(&ax25_uid_lock); 150 read_lock(&ax25_uid_lock);
154 151 return seq_hlist_start_head(&ax25_uid_list, *pos);
155 if (*pos == 0)
156 return SEQ_START_TOKEN;
157
158 ax25_uid_for_each(pt, node, &ax25_uid_list) {
159 if (i == *pos)
160 return pt;
161 ++i;
162 }
163 return NULL;
164} 152}
165 153
166static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) 154static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
167{ 155{
168 ++*pos; 156 return seq_hlist_next(v, &ax25_uid_list, pos);
169 if (v == SEQ_START_TOKEN)
170 return ax25_uid_list.first;
171 else
172 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
173 ax25_uid_assoc, uid_node);
174} 157}
175 158
176static void ax25_uid_seq_stop(struct seq_file *seq, void *v) 159static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
@@ -186,8 +169,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
186 if (v == SEQ_START_TOKEN) 169 if (v == SEQ_START_TOKEN)
187 seq_printf(seq, "Policy: %d\n", ax25_uid_policy); 170 seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
188 else { 171 else {
189 struct ax25_uid_assoc *pt = v; 172 struct ax25_uid_assoc *pt;
190 173
174 pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
191 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call)); 175 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
192 } 176 }
193 return 0; 177 return 0;
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 5159be6b2625..ebe0ef3f1d83 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com) 7 * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com)
8 */ 8 */
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/slab.h>
10#include <linux/sysctl.h> 11#include <linux/sysctl.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <net/ax25.h> 13#include <net/ax25.h>
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 087cc51f5927..404a8500fd03 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -31,7 +31,6 @@
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/skbuff.h> 34#include <linux/skbuff.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/poll.h> 36#include <linux/poll.h>
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ef09c7b3a858..8062dad6d10d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -35,6 +35,7 @@
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/net.h> 37#include <linux/net.h>
38#include <linux/slab.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/socket.h> 41#include <linux/socket.h>
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 26fb831ef7e0..5643a2391e76 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -26,6 +26,7 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/slab.h>
29 30
30#include <linux/socket.h> 31#include <linux/socket.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
@@ -64,7 +65,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
64 struct sk_buff *skb; 65 struct sk_buff *skb;
65 int size; 66 int size;
66 67
67 BT_DBG("%s mc_count %d", dev->name, dev->mc_count); 68 BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
68 69
69 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; 70 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
70 skb = alloc_skb(size, GFP_ATOMIC); 71 skb = alloc_skb(size, GFP_ATOMIC);
@@ -97,7 +98,9 @@ static void bnep_net_set_mc_list(struct net_device *dev)
97 98
98 /* FIXME: We should group addresses here. */ 99 /* FIXME: We should group addresses here. */
99 100
100 for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) { 101 for (i = 0;
102 i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
103 i++) {
101 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 104 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
102 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 105 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
103 dmi = dmi->next; 106 dmi = dmi->next;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2ff6ac7b2ed4..2862f53b66b1 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -30,7 +30,6 @@
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h> 33#include <linux/poll.h>
35#include <linux/fcntl.h> 34#include <linux/fcntl.h>
36#include <linux/skbuff.h> 35#include <linux/skbuff.h>
@@ -39,6 +38,7 @@
39#include <linux/file.h> 38#include <linux/file.h>
40#include <linux/init.h> 39#include <linux/init.h>
41#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <net/sock.h> 42#include <net/sock.h>
43 43
44#include <asm/system.h> 44#include <asm/system.h>
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 97f8d68d574d..3487cfe74aec 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -21,7 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -516,33 +517,37 @@ static char *cmtp_procinfo(struct capi_ctr *ctrl)
516 return "CAPI Message Transport Protocol"; 517 return "CAPI Message Transport Protocol";
517} 518}
518 519
519static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) 520static int cmtp_proc_show(struct seq_file *m, void *v)
520{ 521{
522 struct capi_ctr *ctrl = m->private;
521 struct cmtp_session *session = ctrl->driverdata; 523 struct cmtp_session *session = ctrl->driverdata;
522 struct cmtp_application *app; 524 struct cmtp_application *app;
523 struct list_head *p, *n; 525 struct list_head *p, *n;
524 int len = 0;
525 526
526 len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl)); 527 seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
527 len += sprintf(page + len, "addr %s\n", session->name); 528 seq_printf(m, "addr %s\n", session->name);
528 len += sprintf(page + len, "ctrl %d\n", session->num); 529 seq_printf(m, "ctrl %d\n", session->num);
529 530
530 list_for_each_safe(p, n, &session->applications) { 531 list_for_each_safe(p, n, &session->applications) {
531 app = list_entry(p, struct cmtp_application, list); 532 app = list_entry(p, struct cmtp_application, list);
532 len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping); 533 seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
533 } 534 }
534 535
535 if (off + count >= len) 536 return 0;
536 *eof = 1; 537}
537
538 if (len < off)
539 return 0;
540
541 *start = page + off;
542 538
543 return ((count < len - off) ? count : len - off); 539static int cmtp_proc_open(struct inode *inode, struct file *file)
540{
541 return single_open(file, cmtp_proc_show, PDE(inode)->data);
544} 542}
545 543
544static const struct file_operations cmtp_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = cmtp_proc_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = single_release,
550};
546 551
547int cmtp_attach_device(struct cmtp_session *session) 552int cmtp_attach_device(struct cmtp_session *session)
548{ 553{
@@ -582,7 +587,7 @@ int cmtp_attach_device(struct cmtp_session *session)
582 session->ctrl.send_message = cmtp_send_message; 587 session->ctrl.send_message = cmtp_send_message;
583 588
584 session->ctrl.procinfo = cmtp_procinfo; 589 session->ctrl.procinfo = cmtp_procinfo;
585 session->ctrl.ctr_read_proc = cmtp_ctr_read_proc; 590 session->ctrl.proc_fops = &cmtp_proc_fops;
586 591
587 if (attach_capi_ctr(&session->ctrl) < 0) { 592 if (attach_capi_ctr(&session->ctrl) < 0) {
588 BT_ERR("Can't attach new controller"); 593 BT_ERR("Can't attach new controller");
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 978cc3a718ad..7ea1979a8e4f 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -26,7 +26,6 @@
26#include <linux/capability.h> 26#include <linux/capability.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/poll.h> 29#include <linux/poll.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -34,6 +33,7 @@
34#include <linux/ioctl.h> 33#include <linux/ioctl.h>
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/gfp.h>
37#include <net/sock.h> 37#include <net/sock.h>
38 38
39#include <linux/isdn/capilli.h> 39#include <linux/isdn/capilli.h>
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 94ba34982021..4ad23192c7a5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -491,6 +491,10 @@ int hci_dev_open(__u16 dev)
491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492 set_bit(HCI_RAW, &hdev->flags); 492 set_bit(HCI_RAW, &hdev->flags);
493 493
494 /* Treat all non BR/EDR controllers as raw devices for now */
495 if (hdev->dev_type != HCI_BREDR)
496 set_bit(HCI_RAW, &hdev->flags);
497
494 if (hdev->open(hdev)) { 498 if (hdev->open(hdev)) {
495 ret = -EIO; 499 ret = -EIO;
496 goto done; 500 goto done;
@@ -797,7 +801,7 @@ int hci_get_dev_info(void __user *arg)
797 801
798 strcpy(di.name, hdev->name); 802 strcpy(di.name, hdev->name);
799 di.bdaddr = hdev->bdaddr; 803 di.bdaddr = hdev->bdaddr;
800 di.type = hdev->type; 804 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
801 di.flags = hdev->flags; 805 di.flags = hdev->flags;
802 di.pkt_type = hdev->pkt_type; 806 di.pkt_type = hdev->pkt_type;
803 di.acl_mtu = hdev->acl_mtu; 807 di.acl_mtu = hdev->acl_mtu;
@@ -869,8 +873,8 @@ int hci_register_dev(struct hci_dev *hdev)
869 struct list_head *head = &hci_dev_list, *p; 873 struct list_head *head = &hci_dev_list, *p;
870 int i, id = 0; 874 int i, id = 0;
871 875
872 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, 876 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
873 hdev->type, hdev->owner); 877 hdev->bus, hdev->owner);
874 878
875 if (!hdev->open || !hdev->close || !hdev->destruct) 879 if (!hdev->open || !hdev->close || !hdev->destruct)
876 return -EINVAL; 880 return -EINVAL;
@@ -946,7 +950,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
946{ 950{
947 int i; 951 int i;
948 952
949 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 953 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
950 954
951 write_lock_bh(&hci_dev_list_lock); 955 write_lock_bh(&hci_dev_list_lock);
952 list_del(&hdev->list); 956 list_del(&hdev->list);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 592da5c909c1..6c57fc71c7e2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1698,6 +1698,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1698 hci_conn_add_sysfs(conn); 1698 hci_conn_add_sysfs(conn);
1699 break; 1699 break;
1700 1700
1701 case 0x11: /* Unsupported Feature or Parameter Value */
1701 case 0x1c: /* SCO interval rejected */ 1702 case 0x1c: /* SCO interval rejected */
1702 case 0x1a: /* Unsupported Remote Feature */ 1703 case 0x1a: /* Unsupported Remote Feature */
1703 case 0x1f: /* Unspecified error */ 1704 case 0x1f: /* Unspecified error */
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 688cfebfbee0..38f08f6b86f6 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -329,6 +329,9 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
329 } 329 }
330 330
331 if (mask & HCI_CMSG_TSTAMP) { 331 if (mask & HCI_CMSG_TSTAMP) {
332#ifdef CONFIG_COMPAT
333 struct compat_timeval ctv;
334#endif
332 struct timeval tv; 335 struct timeval tv;
333 void *data; 336 void *data;
334 int len; 337 int len;
@@ -339,7 +342,6 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
339 len = sizeof(tv); 342 len = sizeof(tv);
340#ifdef CONFIG_COMPAT 343#ifdef CONFIG_COMPAT
341 if (msg->msg_flags & MSG_CMSG_COMPAT) { 344 if (msg->msg_flags & MSG_CMSG_COMPAT) {
342 struct compat_timeval ctv;
343 ctv.tv_sec = tv.tv_sec; 345 ctv.tv_sec = tv.tv_sec;
344 ctv.tv_usec = tv.tv_usec; 346 ctv.tv_usec = tv.tv_usec;
345 data = &ctv; 347 data = &ctv;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 2bc6f6a8de68..0e8e1a59856c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,13 +1,18 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/slab.h>
4#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/debugfs.h>
7#include <linux/seq_file.h>
5 8
6#include <net/bluetooth/bluetooth.h> 9#include <net/bluetooth/bluetooth.h>
7#include <net/bluetooth/hci_core.h> 10#include <net/bluetooth/hci_core.h>
8 11
9struct class *bt_class = NULL; 12static struct class *bt_class;
10EXPORT_SYMBOL_GPL(bt_class); 13
14struct dentry *bt_debugfs = NULL;
15EXPORT_SYMBOL_GPL(bt_debugfs);
11 16
12static struct workqueue_struct *bt_workq; 17static struct workqueue_struct *bt_workq;
13 18
@@ -166,9 +171,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
166 queue_work(bt_workq, &conn->work_del); 171 queue_work(bt_workq, &conn->work_del);
167} 172}
168 173
169static inline char *host_typetostr(int type) 174static inline char *host_bustostr(int bus)
170{ 175{
171 switch (type) { 176 switch (bus) {
172 case HCI_VIRTUAL: 177 case HCI_VIRTUAL:
173 return "VIRTUAL"; 178 return "VIRTUAL";
174 case HCI_USB: 179 case HCI_USB:
@@ -188,10 +193,28 @@ static inline char *host_typetostr(int type)
188 } 193 }
189} 194}
190 195
196static inline char *host_typetostr(int type)
197{
198 switch (type) {
199 case HCI_BREDR:
200 return "BR/EDR";
201 case HCI_80211:
202 return "802.11";
203 default:
204 return "UNKNOWN";
205 }
206}
207
208static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
209{
210 struct hci_dev *hdev = dev_get_drvdata(dev);
211 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
212}
213
191static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 214static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
192{ 215{
193 struct hci_dev *hdev = dev_get_drvdata(dev); 216 struct hci_dev *hdev = dev_get_drvdata(dev);
194 return sprintf(buf, "%s\n", host_typetostr(hdev->type)); 217 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
195} 218}
196 219
197static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 220static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -251,32 +274,6 @@ static ssize_t show_hci_revision(struct device *dev, struct device_attribute *at
251 return sprintf(buf, "%d\n", hdev->hci_rev); 274 return sprintf(buf, "%d\n", hdev->hci_rev);
252} 275}
253 276
254static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf)
255{
256 struct hci_dev *hdev = dev_get_drvdata(dev);
257 struct inquiry_cache *cache = &hdev->inq_cache;
258 struct inquiry_entry *e;
259 int n = 0;
260
261 hci_dev_lock_bh(hdev);
262
263 for (e = cache->list; e; e = e->next) {
264 struct inquiry_data *data = &e->data;
265 bdaddr_t bdaddr;
266 baswap(&bdaddr, &data->bdaddr);
267 n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
268 batostr(&bdaddr),
269 data->pscan_rep_mode, data->pscan_period_mode,
270 data->pscan_mode, data->dev_class[2],
271 data->dev_class[1], data->dev_class[0],
272 __le16_to_cpu(data->clock_offset),
273 data->rssi, data->ssp_mode, e->timestamp);
274 }
275
276 hci_dev_unlock_bh(hdev);
277 return n;
278}
279
280static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 277static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
281{ 278{
282 struct hci_dev *hdev = dev_get_drvdata(dev); 279 struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -355,6 +352,7 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib
355 return count; 352 return count;
356} 353}
357 354
355static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
358static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 356static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
359static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 357static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
360static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); 358static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
@@ -363,7 +361,6 @@ static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
363static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); 361static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
364static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); 362static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
365static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 363static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
366static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL);
367 364
368static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 365static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
369 show_idle_timeout, store_idle_timeout); 366 show_idle_timeout, store_idle_timeout);
@@ -373,6 +370,7 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
373 show_sniff_min_interval, store_sniff_min_interval); 370 show_sniff_min_interval, store_sniff_min_interval);
374 371
375static struct attribute *bt_host_attrs[] = { 372static struct attribute *bt_host_attrs[] = {
373 &dev_attr_bus.attr,
376 &dev_attr_type.attr, 374 &dev_attr_type.attr,
377 &dev_attr_name.attr, 375 &dev_attr_name.attr,
378 &dev_attr_class.attr, 376 &dev_attr_class.attr,
@@ -381,7 +379,6 @@ static struct attribute *bt_host_attrs[] = {
381 &dev_attr_manufacturer.attr, 379 &dev_attr_manufacturer.attr,
382 &dev_attr_hci_version.attr, 380 &dev_attr_hci_version.attr,
383 &dev_attr_hci_revision.attr, 381 &dev_attr_hci_revision.attr,
384 &dev_attr_inquiry_cache.attr,
385 &dev_attr_idle_timeout.attr, 382 &dev_attr_idle_timeout.attr,
386 &dev_attr_sniff_max_interval.attr, 383 &dev_attr_sniff_max_interval.attr,
387 &dev_attr_sniff_min_interval.attr, 384 &dev_attr_sniff_min_interval.attr,
@@ -409,12 +406,50 @@ static struct device_type bt_host = {
409 .release = bt_host_release, 406 .release = bt_host_release,
410}; 407};
411 408
409static int inquiry_cache_show(struct seq_file *f, void *p)
410{
411 struct hci_dev *hdev = f->private;
412 struct inquiry_cache *cache = &hdev->inq_cache;
413 struct inquiry_entry *e;
414
415 hci_dev_lock_bh(hdev);
416
417 for (e = cache->list; e; e = e->next) {
418 struct inquiry_data *data = &e->data;
419 bdaddr_t bdaddr;
420 baswap(&bdaddr, &data->bdaddr);
421 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
422 batostr(&bdaddr),
423 data->pscan_rep_mode, data->pscan_period_mode,
424 data->pscan_mode, data->dev_class[2],
425 data->dev_class[1], data->dev_class[0],
426 __le16_to_cpu(data->clock_offset),
427 data->rssi, data->ssp_mode, e->timestamp);
428 }
429
430 hci_dev_unlock_bh(hdev);
431
432 return 0;
433}
434
435static int inquiry_cache_open(struct inode *inode, struct file *file)
436{
437 return single_open(file, inquiry_cache_show, inode->i_private);
438}
439
440static const struct file_operations inquiry_cache_fops = {
441 .open = inquiry_cache_open,
442 .read = seq_read,
443 .llseek = seq_lseek,
444 .release = single_release,
445};
446
412int hci_register_sysfs(struct hci_dev *hdev) 447int hci_register_sysfs(struct hci_dev *hdev)
413{ 448{
414 struct device *dev = &hdev->dev; 449 struct device *dev = &hdev->dev;
415 int err; 450 int err;
416 451
417 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 452 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
418 453
419 dev->type = &bt_host; 454 dev->type = &bt_host;
420 dev->class = bt_class; 455 dev->class = bt_class;
@@ -428,12 +463,24 @@ int hci_register_sysfs(struct hci_dev *hdev)
428 if (err < 0) 463 if (err < 0)
429 return err; 464 return err;
430 465
466 if (!bt_debugfs)
467 return 0;
468
469 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
470 if (!hdev->debugfs)
471 return 0;
472
473 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
474 hdev, &inquiry_cache_fops);
475
431 return 0; 476 return 0;
432} 477}
433 478
434void hci_unregister_sysfs(struct hci_dev *hdev) 479void hci_unregister_sysfs(struct hci_dev *hdev)
435{ 480{
436 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 481 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
482
483 debugfs_remove_recursive(hdev->debugfs);
437 484
438 device_del(&hdev->dev); 485 device_del(&hdev->dev);
439} 486}
@@ -444,6 +491,8 @@ int __init bt_sysfs_init(void)
444 if (!bt_workq) 491 if (!bt_workq)
445 return -ENOMEM; 492 return -ENOMEM;
446 493
494 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
495
447 bt_class = class_create(THIS_MODULE, "bluetooth"); 496 bt_class = class_create(THIS_MODULE, "bluetooth");
448 if (IS_ERR(bt_class)) { 497 if (IS_ERR(bt_class)) {
449 destroy_workqueue(bt_workq); 498 destroy_workqueue(bt_workq);
@@ -455,7 +504,9 @@ int __init bt_sysfs_init(void)
455 504
456void bt_sysfs_cleanup(void) 505void bt_sysfs_cleanup(void)
457{ 506{
458 destroy_workqueue(bt_workq);
459
460 class_destroy(bt_class); 507 class_destroy(bt_class);
508
509 debugfs_remove_recursive(bt_debugfs);
510
511 destroy_workqueue(bt_workq);
461} 512}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index fc6ec1e72652..280529ad9274 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -313,10 +313,21 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
313 return hidp_queue_report(session, buf, rsize); 313 return hidp_queue_report(session, buf, rsize);
314} 314}
315 315
316static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count) 316static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
317 unsigned char report_type)
317{ 318{
318 if (hidp_send_ctrl_message(hid->driver_data, 319 switch (report_type) {
319 HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE, 320 case HID_FEATURE_REPORT:
321 report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
322 break;
323 case HID_OUTPUT_REPORT:
324 report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
325 break;
326 default:
327 return -EINVAL;
328 }
329
330 if (hidp_send_ctrl_message(hid->driver_data, report_type,
320 data, count)) 331 data, count))
321 return -ENOMEM; 332 return -ENOMEM;
322 return count; 333 return count;
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 9cfef68b9fec..250dfd46237d 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -26,7 +26,6 @@
26#include <linux/capability.h> 26#include <linux/capability.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/poll.h> 29#include <linux/poll.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -35,6 +34,7 @@
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h> 38#include <net/sock.h>
39 39
40#include "hidp.h" 40#include "hidp.h"
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 400efa26ddba..9753b690a8b3 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <linux/uaccess.h> 45#include <linux/uaccess.h>
44#include <linux/crc16.h> 46#include <linux/crc16.h>
45#include <net/sock.h> 47#include <net/sock.h>
@@ -1000,7 +1002,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1000 1002
1001 BT_DBG("sk %p", sk); 1003 BT_DBG("sk %p", sk);
1002 1004
1003 if (!addr || addr->sa_family != AF_BLUETOOTH) 1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1004 return -EINVAL; 1007 return -EINVAL;
1005 1008
1006 memset(&la, 0, sizeof(la)); 1009 memset(&la, 0, sizeof(la));
@@ -1623,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1623 /* Connectionless channel */ 1626 /* Connectionless channel */
1624 if (sk->sk_type == SOCK_DGRAM) { 1627 if (sk->sk_type == SOCK_DGRAM) {
1625 skb = l2cap_create_connless_pdu(sk, msg, len); 1628 skb = l2cap_create_connless_pdu(sk, msg, len);
1626 err = l2cap_do_send(sk, skb); 1629 if (IS_ERR(skb))
1630 err = PTR_ERR(skb);
1631 else
1632 err = l2cap_do_send(sk, skb);
1627 goto done; 1633 goto done;
1628 } 1634 }
1629 1635
@@ -2830,6 +2836,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2830 int len = cmd->len - sizeof(*rsp); 2836 int len = cmd->len - sizeof(*rsp);
2831 char req[64]; 2837 char req[64];
2832 2838
2839 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2840 l2cap_send_disconn_req(conn, sk);
2841 goto done;
2842 }
2843
2833 /* throw out any old stored conf requests */ 2844 /* throw out any old stored conf requests */
2834 result = L2CAP_CONF_SUCCESS; 2845 result = L2CAP_CONF_SUCCESS;
2835 len = l2cap_parse_conf_rsp(sk, rsp->data, 2846 len = l2cap_parse_conf_rsp(sk, rsp->data,
@@ -3937,29 +3948,42 @@ drop:
3937 return 0; 3948 return 0;
3938} 3949}
3939 3950
3940static ssize_t l2cap_sysfs_show(struct class *dev, char *buf) 3951static int l2cap_debugfs_show(struct seq_file *f, void *p)
3941{ 3952{
3942 struct sock *sk; 3953 struct sock *sk;
3943 struct hlist_node *node; 3954 struct hlist_node *node;
3944 char *str = buf;
3945 3955
3946 read_lock_bh(&l2cap_sk_list.lock); 3956 read_lock_bh(&l2cap_sk_list.lock);
3947 3957
3948 sk_for_each(sk, node, &l2cap_sk_list.head) { 3958 sk_for_each(sk, node, &l2cap_sk_list.head) {
3949 struct l2cap_pinfo *pi = l2cap_pi(sk); 3959 struct l2cap_pinfo *pi = l2cap_pi(sk);
3950 3960
3951 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 3961 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3952 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 3962 batostr(&bt_sk(sk)->src),
3953 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, 3963 batostr(&bt_sk(sk)->dst),
3954 pi->dcid, pi->imtu, pi->omtu, pi->sec_level); 3964 sk->sk_state, __le16_to_cpu(pi->psm),
3965 pi->scid, pi->dcid,
3966 pi->imtu, pi->omtu, pi->sec_level);
3955 } 3967 }
3956 3968
3957 read_unlock_bh(&l2cap_sk_list.lock); 3969 read_unlock_bh(&l2cap_sk_list.lock);
3958 3970
3959 return str - buf; 3971 return 0;
3960} 3972}
3961 3973
3962static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); 3974static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3975{
3976 return single_open(file, l2cap_debugfs_show, inode->i_private);
3977}
3978
3979static const struct file_operations l2cap_debugfs_fops = {
3980 .open = l2cap_debugfs_open,
3981 .read = seq_read,
3982 .llseek = seq_lseek,
3983 .release = single_release,
3984};
3985
3986static struct dentry *l2cap_debugfs;
3963 3987
3964static const struct proto_ops l2cap_sock_ops = { 3988static const struct proto_ops l2cap_sock_ops = {
3965 .family = PF_BLUETOOTH, 3989 .family = PF_BLUETOOTH,
@@ -4019,8 +4043,12 @@ static int __init l2cap_init(void)
4019 goto error; 4043 goto error;
4020 } 4044 }
4021 4045
4022 if (class_create_file(bt_class, &class_attr_l2cap) < 0) 4046 if (bt_debugfs) {
4023 BT_ERR("Failed to create L2CAP info file"); 4047 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4048 bt_debugfs, NULL, &l2cap_debugfs_fops);
4049 if (!l2cap_debugfs)
4050 BT_ERR("Failed to create L2CAP debug file");
4051 }
4024 4052
4025 BT_INFO("L2CAP ver %s", VERSION); 4053 BT_INFO("L2CAP ver %s", VERSION);
4026 BT_INFO("L2CAP socket layer initialized"); 4054 BT_INFO("L2CAP socket layer initialized");
@@ -4034,7 +4062,7 @@ error:
4034 4062
4035static void __exit l2cap_exit(void) 4063static void __exit l2cap_exit(void)
4036{ 4064{
4037 class_remove_file(bt_class, &class_attr_l2cap); 4065 debugfs_remove(l2cap_debugfs);
4038 4066
4039 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 4067 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4040 BT_ERR("L2CAP socket unregistration failed"); 4068 BT_ERR("L2CAP socket unregistration failed");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 89f4a59eb82b..7dca91bb8c57 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,9 +33,12 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
36#include <linux/net.h> 38#include <linux/net.h>
37#include <linux/mutex.h> 39#include <linux/mutex.h>
38#include <linux/kthread.h> 40#include <linux/kthread.h>
41#include <linux/slab.h>
39 42
40#include <net/sock.h> 43#include <net/sock.h>
41#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -2098,11 +2101,10 @@ static struct hci_cb rfcomm_cb = {
2098 .security_cfm = rfcomm_security_cfm 2101 .security_cfm = rfcomm_security_cfm
2099}; 2102};
2100 2103
2101static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf) 2104static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
2102{ 2105{
2103 struct rfcomm_session *s; 2106 struct rfcomm_session *s;
2104 struct list_head *pp, *p; 2107 struct list_head *pp, *p;
2105 char *str = buf;
2106 2108
2107 rfcomm_lock(); 2109 rfcomm_lock();
2108 2110
@@ -2112,18 +2114,32 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
2112 struct sock *sk = s->sock->sk; 2114 struct sock *sk = s->sock->sk;
2113 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); 2115 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
2114 2116
2115 str += sprintf(str, "%s %s %ld %d %d %d %d\n", 2117 seq_printf(f, "%s %s %ld %d %d %d %d\n",
2116 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 2118 batostr(&bt_sk(sk)->src),
2117 d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); 2119 batostr(&bt_sk(sk)->dst),
2120 d->state, d->dlci, d->mtu,
2121 d->rx_credits, d->tx_credits);
2118 } 2122 }
2119 } 2123 }
2120 2124
2121 rfcomm_unlock(); 2125 rfcomm_unlock();
2122 2126
2123 return (str - buf); 2127 return 0;
2124} 2128}
2125 2129
2126static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); 2130static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file)
2131{
2132 return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private);
2133}
2134
2135static const struct file_operations rfcomm_dlc_debugfs_fops = {
2136 .open = rfcomm_dlc_debugfs_open,
2137 .read = seq_read,
2138 .llseek = seq_lseek,
2139 .release = single_release,
2140};
2141
2142static struct dentry *rfcomm_dlc_debugfs;
2127 2143
2128/* ---- Initialization ---- */ 2144/* ---- Initialization ---- */
2129static int __init rfcomm_init(void) 2145static int __init rfcomm_init(void)
@@ -2140,8 +2156,12 @@ static int __init rfcomm_init(void)
2140 goto unregister; 2156 goto unregister;
2141 } 2157 }
2142 2158
2143 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2159 if (bt_debugfs) {
2144 BT_ERR("Failed to create RFCOMM info file"); 2160 rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
2161 bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
2162 if (!rfcomm_dlc_debugfs)
2163 BT_ERR("Failed to create RFCOMM debug file");
2164 }
2145 2165
2146 err = rfcomm_init_ttys(); 2166 err = rfcomm_init_ttys();
2147 if (err < 0) 2167 if (err < 0)
@@ -2169,7 +2189,7 @@ unregister:
2169 2189
2170static void __exit rfcomm_exit(void) 2190static void __exit rfcomm_exit(void)
2171{ 2191{
2172 class_remove_file(bt_class, &class_attr_rfcomm_dlc); 2192 debugfs_remove(rfcomm_dlc_debugfs);
2173 2193
2174 hci_unregister_cb(&rfcomm_cb); 2194 hci_unregister_cb(&rfcomm_cb);
2175 2195
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 4b5968dda673..8ed3c37684fa 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <net/sock.h> 45#include <net/sock.h>
44 46
45#include <asm/system.h> 47#include <asm/system.h>
@@ -395,7 +397,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
395 397
396 BT_DBG("sk %p", sk); 398 BT_DBG("sk %p", sk);
397 399
398 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) 400 if (alen < sizeof(struct sockaddr_rc) ||
401 addr->sa_family != AF_BLUETOOTH)
399 return -EINVAL; 402 return -EINVAL;
400 403
401 lock_sock(sk); 404 lock_sock(sk);
@@ -1061,26 +1064,38 @@ done:
1061 return result; 1064 return result;
1062} 1065}
1063 1066
1064static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf) 1067static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
1065{ 1068{
1066 struct sock *sk; 1069 struct sock *sk;
1067 struct hlist_node *node; 1070 struct hlist_node *node;
1068 char *str = buf;
1069 1071
1070 read_lock_bh(&rfcomm_sk_list.lock); 1072 read_lock_bh(&rfcomm_sk_list.lock);
1071 1073
1072 sk_for_each(sk, node, &rfcomm_sk_list.head) { 1074 sk_for_each(sk, node, &rfcomm_sk_list.head) {
1073 str += sprintf(str, "%s %s %d %d\n", 1075 seq_printf(f, "%s %s %d %d\n",
1074 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 1076 batostr(&bt_sk(sk)->src),
1077 batostr(&bt_sk(sk)->dst),
1075 sk->sk_state, rfcomm_pi(sk)->channel); 1078 sk->sk_state, rfcomm_pi(sk)->channel);
1076 } 1079 }
1077 1080
1078 read_unlock_bh(&rfcomm_sk_list.lock); 1081 read_unlock_bh(&rfcomm_sk_list.lock);
1079 1082
1080 return (str - buf); 1083 return 0;
1081} 1084}
1082 1085
1083static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); 1086static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
1087{
1088 return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
1089}
1090
1091static const struct file_operations rfcomm_sock_debugfs_fops = {
1092 .open = rfcomm_sock_debugfs_open,
1093 .read = seq_read,
1094 .llseek = seq_lseek,
1095 .release = single_release,
1096};
1097
1098static struct dentry *rfcomm_sock_debugfs;
1084 1099
1085static const struct proto_ops rfcomm_sock_ops = { 1100static const struct proto_ops rfcomm_sock_ops = {
1086 .family = PF_BLUETOOTH, 1101 .family = PF_BLUETOOTH,
@@ -1120,8 +1135,12 @@ int __init rfcomm_init_sockets(void)
1120 if (err < 0) 1135 if (err < 0)
1121 goto error; 1136 goto error;
1122 1137
1123 if (class_create_file(bt_class, &class_attr_rfcomm) < 0) 1138 if (bt_debugfs) {
1124 BT_ERR("Failed to create RFCOMM info file"); 1139 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
1140 bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
1141 if (!rfcomm_sock_debugfs)
1142 BT_ERR("Failed to create RFCOMM debug file");
1143 }
1125 1144
1126 BT_INFO("RFCOMM socket layer initialized"); 1145 BT_INFO("RFCOMM socket layer initialized");
1127 1146
@@ -1135,7 +1154,7 @@ error:
1135 1154
1136void rfcomm_cleanup_sockets(void) 1155void rfcomm_cleanup_sockets(void)
1137{ 1156{
1138 class_remove_file(bt_class, &class_attr_rfcomm); 1157 debugfs_remove(rfcomm_sock_debugfs);
1139 1158
1140 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 1159 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
1141 BT_ERR("RFCOMM socket layer unregistration failed"); 1160 BT_ERR("RFCOMM socket layer unregistration failed");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index dd8f6ec57dce..ca6b2ad1c3fc 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -38,6 +38,8 @@
38#include <linux/socket.h> 38#include <linux/socket.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/debugfs.h>
42#include <linux/seq_file.h>
41#include <linux/list.h> 43#include <linux/list.h>
42#include <net/sock.h> 44#include <net/sock.h>
43 45
@@ -497,7 +499,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
497 499
498 BT_DBG("sk %p", sk); 500 BT_DBG("sk %p", sk);
499 501
500 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) 502 if (alen < sizeof(struct sockaddr_sco) ||
503 addr->sa_family != AF_BLUETOOTH)
501 return -EINVAL; 504 return -EINVAL;
502 505
503 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) 506 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
@@ -953,26 +956,36 @@ drop:
953 return 0; 956 return 0;
954} 957}
955 958
956static ssize_t sco_sysfs_show(struct class *dev, char *buf) 959static int sco_debugfs_show(struct seq_file *f, void *p)
957{ 960{
958 struct sock *sk; 961 struct sock *sk;
959 struct hlist_node *node; 962 struct hlist_node *node;
960 char *str = buf;
961 963
962 read_lock_bh(&sco_sk_list.lock); 964 read_lock_bh(&sco_sk_list.lock);
963 965
964 sk_for_each(sk, node, &sco_sk_list.head) { 966 sk_for_each(sk, node, &sco_sk_list.head) {
965 str += sprintf(str, "%s %s %d\n", 967 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
966 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 968 batostr(&bt_sk(sk)->dst), sk->sk_state);
967 sk->sk_state);
968 } 969 }
969 970
970 read_unlock_bh(&sco_sk_list.lock); 971 read_unlock_bh(&sco_sk_list.lock);
971 972
972 return (str - buf); 973 return 0;
973} 974}
974 975
975static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); 976static int sco_debugfs_open(struct inode *inode, struct file *file)
977{
978 return single_open(file, sco_debugfs_show, inode->i_private);
979}
980
981static const struct file_operations sco_debugfs_fops = {
982 .open = sco_debugfs_open,
983 .read = seq_read,
984 .llseek = seq_lseek,
985 .release = single_release,
986};
987
988static struct dentry *sco_debugfs;
976 989
977static const struct proto_ops sco_sock_ops = { 990static const struct proto_ops sco_sock_ops = {
978 .family = PF_BLUETOOTH, 991 .family = PF_BLUETOOTH,
@@ -1030,8 +1043,12 @@ static int __init sco_init(void)
1030 goto error; 1043 goto error;
1031 } 1044 }
1032 1045
1033 if (class_create_file(bt_class, &class_attr_sco) < 0) 1046 if (bt_debugfs) {
1034 BT_ERR("Failed to create SCO info file"); 1047 sco_debugfs = debugfs_create_file("sco", 0444,
1048 bt_debugfs, NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file");
1051 }
1035 1052
1036 BT_INFO("SCO (Voice Link) ver %s", VERSION); 1053 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1037 BT_INFO("SCO socket layer initialized"); 1054 BT_INFO("SCO socket layer initialized");
@@ -1045,7 +1062,7 @@ error:
1045 1062
1046static void __exit sco_exit(void) 1063static void __exit sco_exit(void)
1047{ 1064{
1048 class_remove_file(bt_class, &class_attr_sco); 1065 debugfs_remove(sco_debugfs);
1049 1066
1050 if (bt_sock_unregister(BTPROTO_SCO) < 0) 1067 if (bt_sock_unregister(BTPROTO_SCO) < 0)
1051 BT_ERR("SCO socket unregistration failed"); 1068 BT_ERR("SCO socket unregistration failed");
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index e143ca678881..d115d5cea5b6 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -31,3 +31,17 @@ config BRIDGE
31 will be called bridge. 31 will be called bridge.
32 32
33 If unsure, say N. 33 If unsure, say N.
34
35config BRIDGE_IGMP_SNOOPING
36 bool "IGMP snooping"
37 depends on BRIDGE
38 depends on INET
39 default y
40 ---help---
41 If you say Y here, then the Ethernet bridge will be able selectively
42 forward multicast traffic based on IGMP traffic received from each
43 port.
44
45 Say N to exclude this support and reduce the binary size.
46
47 If unsure, say Y.
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index f444c12cde5a..d0359ea8ee79 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -12,4 +12,6 @@ bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
12 12
13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o 13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
14 14
15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o
16
15obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 17obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 1a99c4e04e85..90a9024e5c1e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -25,6 +25,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
25 struct net_bridge *br = netdev_priv(dev); 25 struct net_bridge *br = netdev_priv(dev);
26 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
27 struct net_bridge_fdb_entry *dst; 27 struct net_bridge_fdb_entry *dst;
28 struct net_bridge_mdb_entry *mdst;
29
30 BR_INPUT_SKB_CB(skb)->brdev = dev;
28 31
29 dev->stats.tx_packets++; 32 dev->stats.tx_packets++;
30 dev->stats.tx_bytes += skb->len; 33 dev->stats.tx_bytes += skb->len;
@@ -32,13 +35,21 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
32 skb_reset_mac_header(skb); 35 skb_reset_mac_header(skb);
33 skb_pull(skb, ETH_HLEN); 36 skb_pull(skb, ETH_HLEN);
34 37
35 if (dest[0] & 1) 38 if (dest[0] & 1) {
36 br_flood_deliver(br, skb); 39 if (br_multicast_rcv(br, NULL, skb))
37 else if ((dst = __br_fdb_get(br, dest)) != NULL) 40 goto out;
41
42 mdst = br_mdb_get(br, skb);
43 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
44 br_multicast_deliver(mdst, skb);
45 else
46 br_flood_deliver(br, skb);
47 } else if ((dst = __br_fdb_get(br, dest)) != NULL)
38 br_deliver(dst->dst, skb); 48 br_deliver(dst->dst, skb);
39 else 49 else
40 br_flood_deliver(br, skb); 50 br_flood_deliver(br, skb);
41 51
52out:
42 return NETDEV_TX_OK; 53 return NETDEV_TX_OK;
43} 54}
44 55
@@ -49,6 +60,7 @@ static int br_dev_open(struct net_device *dev)
49 br_features_recompute(br); 60 br_features_recompute(br);
50 netif_start_queue(dev); 61 netif_start_queue(dev);
51 br_stp_enable_bridge(br); 62 br_stp_enable_bridge(br);
63 br_multicast_open(br);
52 64
53 return 0; 65 return 0;
54} 66}
@@ -59,7 +71,10 @@ static void br_dev_set_multicast_list(struct net_device *dev)
59 71
60static int br_dev_stop(struct net_device *dev) 72static int br_dev_stop(struct net_device *dev)
61{ 73{
62 br_stp_disable_bridge(netdev_priv(dev)); 74 struct net_bridge *br = netdev_priv(dev);
75
76 br_stp_disable_bridge(br);
77 br_multicast_stop(br);
63 78
64 netif_stop_queue(dev); 79 netif_stop_queue(dev);
65 80
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 3b8e038ab32c..9101a4e56201 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -20,6 +20,7 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/jhash.h> 21#include <linux/jhash.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/slab.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
25#include "br_private.h" 26#include "br_private.h"
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bc1704ac6cd9..7a241c396981 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -11,6 +11,8 @@
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/err.h>
15#include <linux/slab.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/netdevice.h> 17#include <linux/netdevice.h>
16#include <linux/skbuff.h> 18#include <linux/skbuff.h>
@@ -18,6 +20,11 @@
18#include <linux/netfilter_bridge.h> 20#include <linux/netfilter_bridge.h>
19#include "br_private.h" 21#include "br_private.h"
20 22
23static int deliver_clone(const struct net_bridge_port *prev,
24 struct sk_buff *skb,
25 void (*__packet_hook)(const struct net_bridge_port *p,
26 struct sk_buff *skb));
27
21/* Don't forward packets to originating port or forwarding diasabled */ 28/* Don't forward packets to originating port or forwarding diasabled */
22static inline int should_deliver(const struct net_bridge_port *p, 29static inline int should_deliver(const struct net_bridge_port *p,
23 const struct sk_buff *skb) 30 const struct sk_buff *skb)
@@ -93,61 +100,167 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
93} 100}
94 101
95/* called with rcu_read_lock */ 102/* called with rcu_read_lock */
96void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 103void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
97{ 104{
98 if (should_deliver(to, skb)) { 105 if (should_deliver(to, skb)) {
99 __br_forward(to, skb); 106 if (skb0)
107 deliver_clone(to, skb, __br_forward);
108 else
109 __br_forward(to, skb);
100 return; 110 return;
101 } 111 }
102 112
103 kfree_skb(skb); 113 if (!skb0)
114 kfree_skb(skb);
104} 115}
105 116
106/* called under bridge lock */ 117static int deliver_clone(const struct net_bridge_port *prev,
107static void br_flood(struct net_bridge *br, struct sk_buff *skb, 118 struct sk_buff *skb,
119 void (*__packet_hook)(const struct net_bridge_port *p,
120 struct sk_buff *skb))
121{
122 skb = skb_clone(skb, GFP_ATOMIC);
123 if (!skb) {
124 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
125
126 dev->stats.tx_dropped++;
127 return -ENOMEM;
128 }
129
130 __packet_hook(prev, skb);
131 return 0;
132}
133
134static struct net_bridge_port *maybe_deliver(
135 struct net_bridge_port *prev, struct net_bridge_port *p,
136 struct sk_buff *skb,
108 void (*__packet_hook)(const struct net_bridge_port *p, 137 void (*__packet_hook)(const struct net_bridge_port *p,
109 struct sk_buff *skb)) 138 struct sk_buff *skb))
110{ 139{
140 int err;
141
142 if (!should_deliver(p, skb))
143 return prev;
144
145 if (!prev)
146 goto out;
147
148 err = deliver_clone(prev, skb, __packet_hook);
149 if (err)
150 return ERR_PTR(err);
151
152out:
153 return p;
154}
155
156/* called under bridge lock */
157static void br_flood(struct net_bridge *br, struct sk_buff *skb,
158 struct sk_buff *skb0,
159 void (*__packet_hook)(const struct net_bridge_port *p,
160 struct sk_buff *skb))
161{
111 struct net_bridge_port *p; 162 struct net_bridge_port *p;
112 struct net_bridge_port *prev; 163 struct net_bridge_port *prev;
113 164
114 prev = NULL; 165 prev = NULL;
115 166
116 list_for_each_entry_rcu(p, &br->port_list, list) { 167 list_for_each_entry_rcu(p, &br->port_list, list) {
117 if (should_deliver(p, skb)) { 168 prev = maybe_deliver(prev, p, skb, __packet_hook);
118 if (prev != NULL) { 169 if (IS_ERR(prev))
119 struct sk_buff *skb2; 170 goto out;
120
121 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
122 br->dev->stats.tx_dropped++;
123 kfree_skb(skb);
124 return;
125 }
126
127 __packet_hook(prev, skb2);
128 }
129
130 prev = p;
131 }
132 } 171 }
133 172
134 if (prev != NULL) { 173 if (!prev)
174 goto out;
175
176 if (skb0)
177 deliver_clone(prev, skb, __packet_hook);
178 else
135 __packet_hook(prev, skb); 179 __packet_hook(prev, skb);
136 return; 180 return;
137 }
138 181
139 kfree_skb(skb); 182out:
183 if (!skb0)
184 kfree_skb(skb);
140} 185}
141 186
142 187
143/* called with rcu_read_lock */ 188/* called with rcu_read_lock */
144void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) 189void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
145{ 190{
146 br_flood(br, skb, __br_deliver); 191 br_flood(br, skb, NULL, __br_deliver);
147} 192}
148 193
149/* called under bridge lock */ 194/* called under bridge lock */
150void br_flood_forward(struct net_bridge *br, struct sk_buff *skb) 195void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
196 struct sk_buff *skb2)
197{
198 br_flood(br, skb, skb2, __br_forward);
199}
200
201#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
202/* called with rcu_read_lock */
203static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
204 struct sk_buff *skb, struct sk_buff *skb0,
205 void (*__packet_hook)(
206 const struct net_bridge_port *p,
207 struct sk_buff *skb))
208{
209 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
210 struct net_bridge *br = netdev_priv(dev);
211 struct net_bridge_port *port;
212 struct net_bridge_port *lport, *rport;
213 struct net_bridge_port *prev;
214 struct net_bridge_port_group *p;
215 struct hlist_node *rp;
216
217 prev = NULL;
218
219 rp = br->router_list.first;
220 p = mdst ? mdst->ports : NULL;
221 while (p || rp) {
222 lport = p ? p->port : NULL;
223 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
224 NULL;
225
226 port = (unsigned long)lport > (unsigned long)rport ?
227 lport : rport;
228
229 prev = maybe_deliver(prev, port, skb, __packet_hook);
230 if (IS_ERR(prev))
231 goto out;
232
233 if ((unsigned long)lport >= (unsigned long)port)
234 p = p->next;
235 if ((unsigned long)rport >= (unsigned long)port)
236 rp = rp->next;
237 }
238
239 if (!prev)
240 goto out;
241
242 if (skb0)
243 deliver_clone(prev, skb, __packet_hook);
244 else
245 __packet_hook(prev, skb);
246 return;
247
248out:
249 if (!skb0)
250 kfree_skb(skb);
251}
252
253/* called with rcu_read_lock */
254void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
255 struct sk_buff *skb)
256{
257 br_multicast_flood(mdst, skb, NULL, __br_deliver);
258}
259
260/* called with rcu_read_lock */
261void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
262 struct sk_buff *skb, struct sk_buff *skb2)
151{ 263{
152 br_flood(br, skb, __br_forward); 264 br_multicast_flood(mdst, skb, skb2, __br_forward);
153} 265}
266#endif
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a2cbe61f6e65..0b6b1f2ff7ac 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/slab.h>
22#include <net/sock.h> 23#include <net/sock.h>
23 24
24#include "br_private.h" 25#include "br_private.h"
@@ -147,6 +148,8 @@ static void del_nbp(struct net_bridge_port *p)
147 148
148 rcu_assign_pointer(dev->br_port, NULL); 149 rcu_assign_pointer(dev->br_port, NULL);
149 150
151 br_multicast_del_port(p);
152
150 kobject_uevent(&p->kobj, KOBJ_REMOVE); 153 kobject_uevent(&p->kobj, KOBJ_REMOVE);
151 kobject_del(&p->kobj); 154 kobject_del(&p->kobj);
152 155
@@ -206,9 +209,8 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
206 209
207 br_netfilter_rtable_init(br); 210 br_netfilter_rtable_init(br);
208 211
209 INIT_LIST_HEAD(&br->age_list);
210
211 br_stp_timer_init(br); 212 br_stp_timer_init(br);
213 br_multicast_init(br);
212 214
213 return dev; 215 return dev;
214} 216}
@@ -260,6 +262,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
260 br_init_port(p); 262 br_init_port(p);
261 p->state = BR_STATE_DISABLED; 263 p->state = BR_STATE_DISABLED;
262 br_stp_port_timer_init(p); 264 br_stp_port_timer_init(p);
265 br_multicast_add_port(p);
263 266
264 return p; 267 return p;
265} 268}
@@ -467,7 +470,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
467 return 0; 470 return 0;
468} 471}
469 472
470void br_net_exit(struct net *net) 473void __net_exit br_net_exit(struct net *net)
471{ 474{
472 struct net_device *dev; 475 struct net_device *dev;
473 LIST_HEAD(list); 476 LIST_HEAD(list);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5ee1a3682bf2..a82dde2d2ead 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -11,6 +11,7 @@
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/slab.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
@@ -20,9 +21,9 @@
20/* Bridge group multicast address 802.1d (pg 51). */ 21/* Bridge group multicast address 802.1d (pg 51). */
21const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
22 23
23static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) 24static int br_pass_frame_up(struct sk_buff *skb)
24{ 25{
25 struct net_device *indev, *brdev = br->dev; 26 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
26 27
27 brdev->stats.rx_packets++; 28 brdev->stats.rx_packets++;
28 brdev->stats.rx_bytes += skb->len; 29 brdev->stats.rx_bytes += skb->len;
@@ -30,8 +31,8 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
30 indev = skb->dev; 31 indev = skb->dev;
31 skb->dev = brdev; 32 skb->dev = brdev;
32 33
33 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 34 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
34 netif_receive_skb); 35 netif_receive_skb);
35} 36}
36 37
37/* note: already called with rcu_read_lock (preempt_disabled) */ 38/* note: already called with rcu_read_lock (preempt_disabled) */
@@ -41,6 +42,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
41 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 42 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
42 struct net_bridge *br; 43 struct net_bridge *br;
43 struct net_bridge_fdb_entry *dst; 44 struct net_bridge_fdb_entry *dst;
45 struct net_bridge_mdb_entry *mdst;
44 struct sk_buff *skb2; 46 struct sk_buff *skb2;
45 47
46 if (!p || p->state == BR_STATE_DISABLED) 48 if (!p || p->state == BR_STATE_DISABLED)
@@ -50,9 +52,15 @@ int br_handle_frame_finish(struct sk_buff *skb)
50 br = p->br; 52 br = p->br;
51 br_fdb_update(br, p, eth_hdr(skb)->h_source); 53 br_fdb_update(br, p, eth_hdr(skb)->h_source);
52 54
55 if (is_multicast_ether_addr(dest) &&
56 br_multicast_rcv(br, p, skb))
57 goto drop;
58
53 if (p->state == BR_STATE_LEARNING) 59 if (p->state == BR_STATE_LEARNING)
54 goto drop; 60 goto drop;
55 61
62 BR_INPUT_SKB_CB(skb)->brdev = br->dev;
63
56 /* The packet skb2 goes to the local host (NULL to skip). */ 64 /* The packet skb2 goes to the local host (NULL to skip). */
57 skb2 = NULL; 65 skb2 = NULL;
58 66
@@ -62,27 +70,35 @@ int br_handle_frame_finish(struct sk_buff *skb)
62 dst = NULL; 70 dst = NULL;
63 71
64 if (is_multicast_ether_addr(dest)) { 72 if (is_multicast_ether_addr(dest)) {
73 mdst = br_mdb_get(br, skb);
74 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
75 if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
76 br_multicast_is_router(br))
77 skb2 = skb;
78 br_multicast_forward(mdst, skb, skb2);
79 skb = NULL;
80 if (!skb2)
81 goto out;
82 } else
83 skb2 = skb;
84
65 br->dev->stats.multicast++; 85 br->dev->stats.multicast++;
66 skb2 = skb;
67 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 86 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
68 skb2 = skb; 87 skb2 = skb;
69 /* Do not forward the packet since it's local. */ 88 /* Do not forward the packet since it's local. */
70 skb = NULL; 89 skb = NULL;
71 } 90 }
72 91
73 if (skb2 == skb)
74 skb2 = skb_clone(skb, GFP_ATOMIC);
75
76 if (skb2)
77 br_pass_frame_up(br, skb2);
78
79 if (skb) { 92 if (skb) {
80 if (dst) 93 if (dst)
81 br_forward(dst->dst, skb); 94 br_forward(dst->dst, skb, skb2);
82 else 95 else
83 br_flood_forward(br, skb); 96 br_flood_forward(br, skb, skb2);
84 } 97 }
85 98
99 if (skb2)
100 return br_pass_frame_up(skb2);
101
86out: 102out:
87 return 0; 103 return 0;
88drop: 104drop:
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 2af6e4a90262..995afc4b04dc 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/if_bridge.h> 16#include <linux/if_bridge.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/slab.h>
18#include <linux/times.h> 19#include <linux/times.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
new file mode 100644
index 000000000000..eaa0e1bae49b
--- /dev/null
+++ b/net/bridge/br_multicast.c
@@ -0,0 +1,1309 @@
1/*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/if_ether.h>
15#include <linux/igmp.h>
16#include <linux/jhash.h>
17#include <linux/kernel.h>
18#include <linux/log2.h>
19#include <linux/netdevice.h>
20#include <linux/netfilter_bridge.h>
21#include <linux/random.h>
22#include <linux/rculist.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
26#include <net/ip.h>
27
28#include "br_private.h"
29
30static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
31{
32 return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1);
33}
34
35static struct net_bridge_mdb_entry *__br_mdb_ip_get(
36 struct net_bridge_mdb_htable *mdb, __be32 dst, int hash)
37{
38 struct net_bridge_mdb_entry *mp;
39 struct hlist_node *p;
40
41 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
42 if (dst == mp->addr)
43 return mp;
44 }
45
46 return NULL;
47}
48
49static struct net_bridge_mdb_entry *br_mdb_ip_get(
50 struct net_bridge_mdb_htable *mdb, __be32 dst)
51{
52 if (!mdb)
53 return NULL;
54
55 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
56}
57
58struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
59 struct sk_buff *skb)
60{
61 if (br->multicast_disabled)
62 return NULL;
63
64 switch (skb->protocol) {
65 case htons(ETH_P_IP):
66 if (BR_INPUT_SKB_CB(skb)->igmp)
67 break;
68 return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr);
69 }
70
71 return NULL;
72}
73
74static void br_mdb_free(struct rcu_head *head)
75{
76 struct net_bridge_mdb_htable *mdb =
77 container_of(head, struct net_bridge_mdb_htable, rcu);
78 struct net_bridge_mdb_htable *old = mdb->old;
79
80 mdb->old = NULL;
81 kfree(old->mhash);
82 kfree(old);
83}
84
85static int br_mdb_copy(struct net_bridge_mdb_htable *new,
86 struct net_bridge_mdb_htable *old,
87 int elasticity)
88{
89 struct net_bridge_mdb_entry *mp;
90 struct hlist_node *p;
91 int maxlen;
92 int len;
93 int i;
94
95 for (i = 0; i < old->max; i++)
96 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
97 hlist_add_head(&mp->hlist[new->ver],
98 &new->mhash[br_ip_hash(new, mp->addr)]);
99
100 if (!elasticity)
101 return 0;
102
103 maxlen = 0;
104 for (i = 0; i < new->max; i++) {
105 len = 0;
106 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
107 len++;
108 if (len > maxlen)
109 maxlen = len;
110 }
111
112 return maxlen > elasticity ? -EINVAL : 0;
113}
114
115static void br_multicast_free_pg(struct rcu_head *head)
116{
117 struct net_bridge_port_group *p =
118 container_of(head, struct net_bridge_port_group, rcu);
119
120 kfree(p);
121}
122
123static void br_multicast_free_group(struct rcu_head *head)
124{
125 struct net_bridge_mdb_entry *mp =
126 container_of(head, struct net_bridge_mdb_entry, rcu);
127
128 kfree(mp);
129}
130
131static void br_multicast_group_expired(unsigned long data)
132{
133 struct net_bridge_mdb_entry *mp = (void *)data;
134 struct net_bridge *br = mp->br;
135 struct net_bridge_mdb_htable *mdb;
136
137 spin_lock(&br->multicast_lock);
138 if (!netif_running(br->dev) || timer_pending(&mp->timer))
139 goto out;
140
141 if (!hlist_unhashed(&mp->mglist))
142 hlist_del_init(&mp->mglist);
143
144 if (mp->ports)
145 goto out;
146
147 mdb = br->mdb;
148 hlist_del_rcu(&mp->hlist[mdb->ver]);
149 mdb->size--;
150
151 del_timer(&mp->query_timer);
152 call_rcu_bh(&mp->rcu, br_multicast_free_group);
153
154out:
155 spin_unlock(&br->multicast_lock);
156}
157
158static void br_multicast_del_pg(struct net_bridge *br,
159 struct net_bridge_port_group *pg)
160{
161 struct net_bridge_mdb_htable *mdb = br->mdb;
162 struct net_bridge_mdb_entry *mp;
163 struct net_bridge_port_group *p;
164 struct net_bridge_port_group **pp;
165
166 mp = br_mdb_ip_get(mdb, pg->addr);
167 if (WARN_ON(!mp))
168 return;
169
170 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
171 if (p != pg)
172 continue;
173
174 *pp = p->next;
175 hlist_del_init(&p->mglist);
176 del_timer(&p->timer);
177 del_timer(&p->query_timer);
178 call_rcu_bh(&p->rcu, br_multicast_free_pg);
179
180 if (!mp->ports && hlist_unhashed(&mp->mglist) &&
181 netif_running(br->dev))
182 mod_timer(&mp->timer, jiffies);
183
184 return;
185 }
186
187 WARN_ON(1);
188}
189
190static void br_multicast_port_group_expired(unsigned long data)
191{
192 struct net_bridge_port_group *pg = (void *)data;
193 struct net_bridge *br = pg->port->br;
194
195 spin_lock(&br->multicast_lock);
196 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
197 hlist_unhashed(&pg->mglist))
198 goto out;
199
200 br_multicast_del_pg(br, pg);
201
202out:
203 spin_unlock(&br->multicast_lock);
204}
205
206static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max,
207 int elasticity)
208{
209 struct net_bridge_mdb_htable *old = *mdbp;
210 struct net_bridge_mdb_htable *mdb;
211 int err;
212
213 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
214 if (!mdb)
215 return -ENOMEM;
216
217 mdb->max = max;
218 mdb->old = old;
219
220 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
221 if (!mdb->mhash) {
222 kfree(mdb);
223 return -ENOMEM;
224 }
225
226 mdb->size = old ? old->size : 0;
227 mdb->ver = old ? old->ver ^ 1 : 0;
228
229 if (!old || elasticity)
230 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
231 else
232 mdb->secret = old->secret;
233
234 if (!old)
235 goto out;
236
237 err = br_mdb_copy(mdb, old, elasticity);
238 if (err) {
239 kfree(mdb->mhash);
240 kfree(mdb);
241 return err;
242 }
243
244 call_rcu_bh(&mdb->rcu, br_mdb_free);
245
246out:
247 rcu_assign_pointer(*mdbp, mdb);
248
249 return 0;
250}
251
252static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
253 __be32 group)
254{
255 struct sk_buff *skb;
256 struct igmphdr *ih;
257 struct ethhdr *eth;
258 struct iphdr *iph;
259
260 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
261 sizeof(*ih) + 4);
262 if (!skb)
263 goto out;
264
265 skb->protocol = htons(ETH_P_IP);
266
267 skb_reset_mac_header(skb);
268 eth = eth_hdr(skb);
269
270 memcpy(eth->h_source, br->dev->dev_addr, 6);
271 eth->h_dest[0] = 1;
272 eth->h_dest[1] = 0;
273 eth->h_dest[2] = 0x5e;
274 eth->h_dest[3] = 0;
275 eth->h_dest[4] = 0;
276 eth->h_dest[5] = 1;
277 eth->h_proto = htons(ETH_P_IP);
278 skb_put(skb, sizeof(*eth));
279
280 skb_set_network_header(skb, skb->len);
281 iph = ip_hdr(skb);
282
283 iph->version = 4;
284 iph->ihl = 6;
285 iph->tos = 0xc0;
286 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
287 iph->id = 0;
288 iph->frag_off = htons(IP_DF);
289 iph->ttl = 1;
290 iph->protocol = IPPROTO_IGMP;
291 iph->saddr = 0;
292 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
293 ((u8 *)&iph[1])[0] = IPOPT_RA;
294 ((u8 *)&iph[1])[1] = 4;
295 ((u8 *)&iph[1])[2] = 0;
296 ((u8 *)&iph[1])[3] = 0;
297 ip_send_check(iph);
298 skb_put(skb, 24);
299
300 skb_set_transport_header(skb, skb->len);
301 ih = igmp_hdr(skb);
302 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
303 ih->code = (group ? br->multicast_last_member_interval :
304 br->multicast_query_response_interval) /
305 (HZ / IGMP_TIMER_SCALE);
306 ih->group = group;
307 ih->csum = 0;
308 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
309 skb_put(skb, sizeof(*ih));
310
311 __skb_pull(skb, sizeof(*eth));
312
313out:
314 return skb;
315}
316
317static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
318{
319 struct net_bridge *br = mp->br;
320 struct sk_buff *skb;
321
322 skb = br_multicast_alloc_query(br, mp->addr);
323 if (!skb)
324 goto timer;
325
326 netif_rx(skb);
327
328timer:
329 if (++mp->queries_sent < br->multicast_last_member_count)
330 mod_timer(&mp->query_timer,
331 jiffies + br->multicast_last_member_interval);
332}
333
334static void br_multicast_group_query_expired(unsigned long data)
335{
336 struct net_bridge_mdb_entry *mp = (void *)data;
337 struct net_bridge *br = mp->br;
338
339 spin_lock(&br->multicast_lock);
340 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
341 mp->queries_sent >= br->multicast_last_member_count)
342 goto out;
343
344 br_multicast_send_group_query(mp);
345
346out:
347 spin_unlock(&br->multicast_lock);
348}
349
350static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
351{
352 struct net_bridge_port *port = pg->port;
353 struct net_bridge *br = port->br;
354 struct sk_buff *skb;
355
356 skb = br_multicast_alloc_query(br, pg->addr);
357 if (!skb)
358 goto timer;
359
360 br_deliver(port, skb);
361
362timer:
363 if (++pg->queries_sent < br->multicast_last_member_count)
364 mod_timer(&pg->query_timer,
365 jiffies + br->multicast_last_member_interval);
366}
367
368static void br_multicast_port_group_query_expired(unsigned long data)
369{
370 struct net_bridge_port_group *pg = (void *)data;
371 struct net_bridge_port *port = pg->port;
372 struct net_bridge *br = port->br;
373
374 spin_lock(&br->multicast_lock);
375 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
376 pg->queries_sent >= br->multicast_last_member_count)
377 goto out;
378
379 br_multicast_send_port_group_query(pg);
380
381out:
382 spin_unlock(&br->multicast_lock);
383}
384
385static struct net_bridge_mdb_entry *br_multicast_get_group(
386 struct net_bridge *br, struct net_bridge_port *port, __be32 group,
387 int hash)
388{
389 struct net_bridge_mdb_htable *mdb = br->mdb;
390 struct net_bridge_mdb_entry *mp;
391 struct hlist_node *p;
392 unsigned count = 0;
393 unsigned max;
394 int elasticity;
395 int err;
396
397 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
398 count++;
399 if (unlikely(group == mp->addr)) {
400 return mp;
401 }
402 }
403
404 elasticity = 0;
405 max = mdb->max;
406
407 if (unlikely(count > br->hash_elasticity && count)) {
408 if (net_ratelimit())
409 printk(KERN_INFO "%s: Multicast hash table "
410 "chain limit reached: %s\n",
411 br->dev->name, port ? port->dev->name :
412 br->dev->name);
413
414 elasticity = br->hash_elasticity;
415 }
416
417 if (mdb->size >= max) {
418 max *= 2;
419 if (unlikely(max >= br->hash_max)) {
420 printk(KERN_WARNING "%s: Multicast hash table maximum "
421 "reached, disabling snooping: %s, %d\n",
422 br->dev->name, port ? port->dev->name :
423 br->dev->name,
424 max);
425 err = -E2BIG;
426disable:
427 br->multicast_disabled = 1;
428 goto err;
429 }
430 }
431
432 if (max > mdb->max || elasticity) {
433 if (mdb->old) {
434 if (net_ratelimit())
435 printk(KERN_INFO "%s: Multicast hash table "
436 "on fire: %s\n",
437 br->dev->name, port ? port->dev->name :
438 br->dev->name);
439 err = -EEXIST;
440 goto err;
441 }
442
443 err = br_mdb_rehash(&br->mdb, max, elasticity);
444 if (err) {
445 printk(KERN_WARNING "%s: Cannot rehash multicast "
446 "hash table, disabling snooping: "
447 "%s, %d, %d\n",
448 br->dev->name, port ? port->dev->name :
449 br->dev->name,
450 mdb->size, err);
451 goto disable;
452 }
453
454 err = -EAGAIN;
455 goto err;
456 }
457
458 return NULL;
459
460err:
461 mp = ERR_PTR(err);
462 return mp;
463}
464
465static struct net_bridge_mdb_entry *br_multicast_new_group(
466 struct net_bridge *br, struct net_bridge_port *port, __be32 group)
467{
468 struct net_bridge_mdb_htable *mdb = br->mdb;
469 struct net_bridge_mdb_entry *mp;
470 int hash;
471
472 if (!mdb) {
473 if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
474 return NULL;
475 goto rehash;
476 }
477
478 hash = br_ip_hash(mdb, group);
479 mp = br_multicast_get_group(br, port, group, hash);
480 switch (PTR_ERR(mp)) {
481 case 0:
482 break;
483
484 case -EAGAIN:
485rehash:
486 mdb = br->mdb;
487 hash = br_ip_hash(mdb, group);
488 break;
489
490 default:
491 goto out;
492 }
493
494 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
495 if (unlikely(!mp))
496 goto out;
497
498 mp->br = br;
499 mp->addr = group;
500 setup_timer(&mp->timer, br_multicast_group_expired,
501 (unsigned long)mp);
502 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
503 (unsigned long)mp);
504
505 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
506 mdb->size++;
507
508out:
509 return mp;
510}
511
512static int br_multicast_add_group(struct net_bridge *br,
513 struct net_bridge_port *port, __be32 group)
514{
515 struct net_bridge_mdb_entry *mp;
516 struct net_bridge_port_group *p;
517 struct net_bridge_port_group **pp;
518 unsigned long now = jiffies;
519 int err;
520
521 if (ipv4_is_local_multicast(group))
522 return 0;
523
524 spin_lock(&br->multicast_lock);
525 if (!netif_running(br->dev) ||
526 (port && port->state == BR_STATE_DISABLED))
527 goto out;
528
529 mp = br_multicast_new_group(br, port, group);
530 err = PTR_ERR(mp);
531 if (unlikely(IS_ERR(mp) || !mp))
532 goto err;
533
534 if (!port) {
535 hlist_add_head(&mp->mglist, &br->mglist);
536 mod_timer(&mp->timer, now + br->multicast_membership_interval);
537 goto out;
538 }
539
540 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
541 if (p->port == port)
542 goto found;
543 if ((unsigned long)p->port < (unsigned long)port)
544 break;
545 }
546
547 p = kzalloc(sizeof(*p), GFP_ATOMIC);
548 err = -ENOMEM;
549 if (unlikely(!p))
550 goto err;
551
552 p->addr = group;
553 p->port = port;
554 p->next = *pp;
555 hlist_add_head(&p->mglist, &port->mglist);
556 setup_timer(&p->timer, br_multicast_port_group_expired,
557 (unsigned long)p);
558 setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
559 (unsigned long)p);
560
561 rcu_assign_pointer(*pp, p);
562
563found:
564 mod_timer(&p->timer, now + br->multicast_membership_interval);
565out:
566 err = 0;
567
568err:
569 spin_unlock(&br->multicast_lock);
570 return err;
571}
572
573static void br_multicast_router_expired(unsigned long data)
574{
575 struct net_bridge_port *port = (void *)data;
576 struct net_bridge *br = port->br;
577
578 spin_lock(&br->multicast_lock);
579 if (port->multicast_router != 1 ||
580 timer_pending(&port->multicast_router_timer) ||
581 hlist_unhashed(&port->rlist))
582 goto out;
583
584 hlist_del_init_rcu(&port->rlist);
585
586out:
587 spin_unlock(&br->multicast_lock);
588}
589
590static void br_multicast_local_router_expired(unsigned long data)
591{
592}
593
594static void br_multicast_send_query(struct net_bridge *br,
595 struct net_bridge_port *port, u32 sent)
596{
597 unsigned long time;
598 struct sk_buff *skb;
599
600 if (!netif_running(br->dev) || br->multicast_disabled ||
601 timer_pending(&br->multicast_querier_timer))
602 return;
603
604 skb = br_multicast_alloc_query(br, 0);
605 if (!skb)
606 goto timer;
607
608 if (port) {
609 __skb_push(skb, sizeof(struct ethhdr));
610 skb->dev = port->dev;
611 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
612 dev_queue_xmit);
613 } else
614 netif_rx(skb);
615
616timer:
617 time = jiffies;
618 time += sent < br->multicast_startup_query_count ?
619 br->multicast_startup_query_interval :
620 br->multicast_query_interval;
621 mod_timer(port ? &port->multicast_query_timer :
622 &br->multicast_query_timer, time);
623}
624
625static void br_multicast_port_query_expired(unsigned long data)
626{
627 struct net_bridge_port *port = (void *)data;
628 struct net_bridge *br = port->br;
629
630 spin_lock(&br->multicast_lock);
631 if (port->state == BR_STATE_DISABLED ||
632 port->state == BR_STATE_BLOCKING)
633 goto out;
634
635 if (port->multicast_startup_queries_sent <
636 br->multicast_startup_query_count)
637 port->multicast_startup_queries_sent++;
638
639 br_multicast_send_query(port->br, port,
640 port->multicast_startup_queries_sent);
641
642out:
643 spin_unlock(&br->multicast_lock);
644}
645
646void br_multicast_add_port(struct net_bridge_port *port)
647{
648 port->multicast_router = 1;
649
650 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
651 (unsigned long)port);
652 setup_timer(&port->multicast_query_timer,
653 br_multicast_port_query_expired, (unsigned long)port);
654}
655
656void br_multicast_del_port(struct net_bridge_port *port)
657{
658 del_timer_sync(&port->multicast_router_timer);
659}
660
661static void __br_multicast_enable_port(struct net_bridge_port *port)
662{
663 port->multicast_startup_queries_sent = 0;
664
665 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
666 del_timer(&port->multicast_query_timer))
667 mod_timer(&port->multicast_query_timer, jiffies);
668}
669
670void br_multicast_enable_port(struct net_bridge_port *port)
671{
672 struct net_bridge *br = port->br;
673
674 spin_lock(&br->multicast_lock);
675 if (br->multicast_disabled || !netif_running(br->dev))
676 goto out;
677
678 __br_multicast_enable_port(port);
679
680out:
681 spin_unlock(&br->multicast_lock);
682}
683
684void br_multicast_disable_port(struct net_bridge_port *port)
685{
686 struct net_bridge *br = port->br;
687 struct net_bridge_port_group *pg;
688 struct hlist_node *p, *n;
689
690 spin_lock(&br->multicast_lock);
691 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
692 br_multicast_del_pg(br, pg);
693
694 if (!hlist_unhashed(&port->rlist))
695 hlist_del_init_rcu(&port->rlist);
696 del_timer(&port->multicast_router_timer);
697 del_timer(&port->multicast_query_timer);
698 spin_unlock(&br->multicast_lock);
699}
700
701static int br_multicast_igmp3_report(struct net_bridge *br,
702 struct net_bridge_port *port,
703 struct sk_buff *skb)
704{
705 struct igmpv3_report *ih;
706 struct igmpv3_grec *grec;
707 int i;
708 int len;
709 int num;
710 int type;
711 int err = 0;
712 __be32 group;
713
714 if (!pskb_may_pull(skb, sizeof(*ih)))
715 return -EINVAL;
716
717 ih = igmpv3_report_hdr(skb);
718 num = ntohs(ih->ngrec);
719 len = sizeof(*ih);
720
721 for (i = 0; i < num; i++) {
722 len += sizeof(*grec);
723 if (!pskb_may_pull(skb, len))
724 return -EINVAL;
725
726 grec = (void *)(skb->data + len - sizeof(*grec));
727 group = grec->grec_mca;
728 type = grec->grec_type;
729
730 len += ntohs(grec->grec_nsrcs) * 4;
731 if (!pskb_may_pull(skb, len))
732 return -EINVAL;
733
734 /* We treat this as an IGMPv2 report for now. */
735 switch (type) {
736 case IGMPV3_MODE_IS_INCLUDE:
737 case IGMPV3_MODE_IS_EXCLUDE:
738 case IGMPV3_CHANGE_TO_INCLUDE:
739 case IGMPV3_CHANGE_TO_EXCLUDE:
740 case IGMPV3_ALLOW_NEW_SOURCES:
741 case IGMPV3_BLOCK_OLD_SOURCES:
742 break;
743
744 default:
745 continue;
746 }
747
748 err = br_multicast_add_group(br, port, group);
749 if (err)
750 break;
751 }
752
753 return err;
754}
755
756static void br_multicast_add_router(struct net_bridge *br,
757 struct net_bridge_port *port)
758{
759 struct hlist_node *p;
760 struct hlist_node **h;
761
762 for (h = &br->router_list.first;
763 (p = *h) &&
764 (unsigned long)container_of(p, struct net_bridge_port, rlist) >
765 (unsigned long)port;
766 h = &p->next)
767 ;
768
769 port->rlist.pprev = h;
770 port->rlist.next = p;
771 rcu_assign_pointer(*h, &port->rlist);
772 if (p)
773 p->pprev = &port->rlist.next;
774}
775
776static void br_multicast_mark_router(struct net_bridge *br,
777 struct net_bridge_port *port)
778{
779 unsigned long now = jiffies;
780
781 if (!port) {
782 if (br->multicast_router == 1)
783 mod_timer(&br->multicast_router_timer,
784 now + br->multicast_querier_interval);
785 return;
786 }
787
788 if (port->multicast_router != 1)
789 return;
790
791 if (!hlist_unhashed(&port->rlist))
792 goto timer;
793
794 br_multicast_add_router(br, port);
795
796timer:
797 mod_timer(&port->multicast_router_timer,
798 now + br->multicast_querier_interval);
799}
800
801static void br_multicast_query_received(struct net_bridge *br,
802 struct net_bridge_port *port,
803 __be32 saddr)
804{
805 if (saddr)
806 mod_timer(&br->multicast_querier_timer,
807 jiffies + br->multicast_querier_interval);
808 else if (timer_pending(&br->multicast_querier_timer))
809 return;
810
811 br_multicast_mark_router(br, port);
812}
813
814static int br_multicast_query(struct net_bridge *br,
815 struct net_bridge_port *port,
816 struct sk_buff *skb)
817{
818 struct iphdr *iph = ip_hdr(skb);
819 struct igmphdr *ih = igmp_hdr(skb);
820 struct net_bridge_mdb_entry *mp;
821 struct igmpv3_query *ih3;
822 struct net_bridge_port_group *p;
823 struct net_bridge_port_group **pp;
824 unsigned long max_delay;
825 unsigned long now = jiffies;
826 __be32 group;
827 int err = 0;
828
829 spin_lock(&br->multicast_lock);
830 if (!netif_running(br->dev) ||
831 (port && port->state == BR_STATE_DISABLED))
832 goto out;
833
834 br_multicast_query_received(br, port, iph->saddr);
835
836 group = ih->group;
837
838 if (skb->len == sizeof(*ih)) {
839 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
840
841 if (!max_delay) {
842 max_delay = 10 * HZ;
843 group = 0;
844 }
845 } else {
846 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
847 err = -EINVAL;
848 goto out;
849 }
850
851 ih3 = igmpv3_query_hdr(skb);
852 if (ih3->nsrcs)
853 goto out;
854
855 max_delay = ih3->code ?
856 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
857 }
858
859 if (!group)
860 goto out;
861
862 mp = br_mdb_ip_get(br->mdb, group);
863 if (!mp)
864 goto out;
865
866 max_delay *= br->multicast_last_member_count;
867
868 if (!hlist_unhashed(&mp->mglist) &&
869 (timer_pending(&mp->timer) ?
870 time_after(mp->timer.expires, now + max_delay) :
871 try_to_del_timer_sync(&mp->timer) >= 0))
872 mod_timer(&mp->timer, now + max_delay);
873
874 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
875 if (timer_pending(&p->timer) ?
876 time_after(p->timer.expires, now + max_delay) :
877 try_to_del_timer_sync(&p->timer) >= 0)
878 mod_timer(&mp->timer, now + max_delay);
879 }
880
881out:
882 spin_unlock(&br->multicast_lock);
883 return err;
884}
885
886static void br_multicast_leave_group(struct net_bridge *br,
887 struct net_bridge_port *port,
888 __be32 group)
889{
890 struct net_bridge_mdb_htable *mdb;
891 struct net_bridge_mdb_entry *mp;
892 struct net_bridge_port_group *p;
893 unsigned long now;
894 unsigned long time;
895
896 if (ipv4_is_local_multicast(group))
897 return;
898
899 spin_lock(&br->multicast_lock);
900 if (!netif_running(br->dev) ||
901 (port && port->state == BR_STATE_DISABLED) ||
902 timer_pending(&br->multicast_querier_timer))
903 goto out;
904
905 mdb = br->mdb;
906 mp = br_mdb_ip_get(mdb, group);
907 if (!mp)
908 goto out;
909
910 now = jiffies;
911 time = now + br->multicast_last_member_count *
912 br->multicast_last_member_interval;
913
914 if (!port) {
915 if (!hlist_unhashed(&mp->mglist) &&
916 (timer_pending(&mp->timer) ?
917 time_after(mp->timer.expires, time) :
918 try_to_del_timer_sync(&mp->timer) >= 0)) {
919 mod_timer(&mp->timer, time);
920
921 mp->queries_sent = 0;
922 mod_timer(&mp->query_timer, now);
923 }
924
925 goto out;
926 }
927
928 for (p = mp->ports; p; p = p->next) {
929 if (p->port != port)
930 continue;
931
932 if (!hlist_unhashed(&p->mglist) &&
933 (timer_pending(&p->timer) ?
934 time_after(p->timer.expires, time) :
935 try_to_del_timer_sync(&p->timer) >= 0)) {
936 mod_timer(&p->timer, time);
937
938 p->queries_sent = 0;
939 mod_timer(&p->query_timer, now);
940 }
941
942 break;
943 }
944
945out:
946 spin_unlock(&br->multicast_lock);
947}
948
949static int br_multicast_ipv4_rcv(struct net_bridge *br,
950 struct net_bridge_port *port,
951 struct sk_buff *skb)
952{
953 struct sk_buff *skb2 = skb;
954 struct iphdr *iph;
955 struct igmphdr *ih;
956 unsigned len;
957 unsigned offset;
958 int err;
959
960 /* We treat OOM as packet loss for now. */
961 if (!pskb_may_pull(skb, sizeof(*iph)))
962 return -EINVAL;
963
964 iph = ip_hdr(skb);
965
966 if (iph->ihl < 5 || iph->version != 4)
967 return -EINVAL;
968
969 if (!pskb_may_pull(skb, ip_hdrlen(skb)))
970 return -EINVAL;
971
972 iph = ip_hdr(skb);
973
974 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
975 return -EINVAL;
976
977 if (iph->protocol != IPPROTO_IGMP)
978 return 0;
979
980 len = ntohs(iph->tot_len);
981 if (skb->len < len || len < ip_hdrlen(skb))
982 return -EINVAL;
983
984 if (skb->len > len) {
985 skb2 = skb_clone(skb, GFP_ATOMIC);
986 if (!skb2)
987 return -ENOMEM;
988
989 err = pskb_trim_rcsum(skb2, len);
990 if (err)
991 goto err_out;
992 }
993
994 len -= ip_hdrlen(skb2);
995 offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
996 __skb_pull(skb2, offset);
997 skb_reset_transport_header(skb2);
998
999 err = -EINVAL;
1000 if (!pskb_may_pull(skb2, sizeof(*ih)))
1001 goto out;
1002
1003 iph = ip_hdr(skb2);
1004
1005 switch (skb2->ip_summed) {
1006 case CHECKSUM_COMPLETE:
1007 if (!csum_fold(skb2->csum))
1008 break;
1009 /* fall through */
1010 case CHECKSUM_NONE:
1011 skb2->csum = 0;
1012 if (skb_checksum_complete(skb2))
1013 goto out;
1014 }
1015
1016 err = 0;
1017
1018 BR_INPUT_SKB_CB(skb)->igmp = 1;
1019 ih = igmp_hdr(skb2);
1020
1021 switch (ih->type) {
1022 case IGMP_HOST_MEMBERSHIP_REPORT:
1023 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1024 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1025 err = br_multicast_add_group(br, port, ih->group);
1026 break;
1027 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1028 err = br_multicast_igmp3_report(br, port, skb2);
1029 break;
1030 case IGMP_HOST_MEMBERSHIP_QUERY:
1031 err = br_multicast_query(br, port, skb2);
1032 break;
1033 case IGMP_HOST_LEAVE_MESSAGE:
1034 br_multicast_leave_group(br, port, ih->group);
1035 break;
1036 }
1037
1038out:
1039 __skb_push(skb2, offset);
1040err_out:
1041 if (skb2 != skb)
1042 kfree_skb(skb2);
1043 return err;
1044}
1045
1046int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1047 struct sk_buff *skb)
1048{
1049 BR_INPUT_SKB_CB(skb)->igmp = 0;
1050 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1051
1052 if (br->multicast_disabled)
1053 return 0;
1054
1055 switch (skb->protocol) {
1056 case htons(ETH_P_IP):
1057 return br_multicast_ipv4_rcv(br, port, skb);
1058 }
1059
1060 return 0;
1061}
1062
1063static void br_multicast_query_expired(unsigned long data)
1064{
1065 struct net_bridge *br = (void *)data;
1066
1067 spin_lock(&br->multicast_lock);
1068 if (br->multicast_startup_queries_sent <
1069 br->multicast_startup_query_count)
1070 br->multicast_startup_queries_sent++;
1071
1072 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
1073
1074 spin_unlock(&br->multicast_lock);
1075}
1076
1077void br_multicast_init(struct net_bridge *br)
1078{
1079 br->hash_elasticity = 4;
1080 br->hash_max = 512;
1081
1082 br->multicast_router = 1;
1083 br->multicast_last_member_count = 2;
1084 br->multicast_startup_query_count = 2;
1085
1086 br->multicast_last_member_interval = HZ;
1087 br->multicast_query_response_interval = 10 * HZ;
1088 br->multicast_startup_query_interval = 125 * HZ / 4;
1089 br->multicast_query_interval = 125 * HZ;
1090 br->multicast_querier_interval = 255 * HZ;
1091 br->multicast_membership_interval = 260 * HZ;
1092
1093 spin_lock_init(&br->multicast_lock);
1094 setup_timer(&br->multicast_router_timer,
1095 br_multicast_local_router_expired, 0);
1096 setup_timer(&br->multicast_querier_timer,
1097 br_multicast_local_router_expired, 0);
1098 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1099 (unsigned long)br);
1100}
1101
1102void br_multicast_open(struct net_bridge *br)
1103{
1104 br->multicast_startup_queries_sent = 0;
1105
1106 if (br->multicast_disabled)
1107 return;
1108
1109 mod_timer(&br->multicast_query_timer, jiffies);
1110}
1111
1112void br_multicast_stop(struct net_bridge *br)
1113{
1114 struct net_bridge_mdb_htable *mdb;
1115 struct net_bridge_mdb_entry *mp;
1116 struct hlist_node *p, *n;
1117 u32 ver;
1118 int i;
1119
1120 del_timer_sync(&br->multicast_router_timer);
1121 del_timer_sync(&br->multicast_querier_timer);
1122 del_timer_sync(&br->multicast_query_timer);
1123
1124 spin_lock_bh(&br->multicast_lock);
1125 mdb = br->mdb;
1126 if (!mdb)
1127 goto out;
1128
1129 br->mdb = NULL;
1130
1131 ver = mdb->ver;
1132 for (i = 0; i < mdb->max; i++) {
1133 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1134 hlist[ver]) {
1135 del_timer(&mp->timer);
1136 del_timer(&mp->query_timer);
1137 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1138 }
1139 }
1140
1141 if (mdb->old) {
1142 spin_unlock_bh(&br->multicast_lock);
1143 rcu_barrier_bh();
1144 spin_lock_bh(&br->multicast_lock);
1145 WARN_ON(mdb->old);
1146 }
1147
1148 mdb->old = mdb;
1149 call_rcu_bh(&mdb->rcu, br_mdb_free);
1150
1151out:
1152 spin_unlock_bh(&br->multicast_lock);
1153}
1154
1155int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1156{
1157 int err = -ENOENT;
1158
1159 spin_lock_bh(&br->multicast_lock);
1160 if (!netif_running(br->dev))
1161 goto unlock;
1162
1163 switch (val) {
1164 case 0:
1165 case 2:
1166 del_timer(&br->multicast_router_timer);
1167 /* fall through */
1168 case 1:
1169 br->multicast_router = val;
1170 err = 0;
1171 break;
1172
1173 default:
1174 err = -EINVAL;
1175 break;
1176 }
1177
1178unlock:
1179 spin_unlock_bh(&br->multicast_lock);
1180
1181 return err;
1182}
1183
1184int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1185{
1186 struct net_bridge *br = p->br;
1187 int err = -ENOENT;
1188
1189 spin_lock(&br->multicast_lock);
1190 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1191 goto unlock;
1192
1193 switch (val) {
1194 case 0:
1195 case 1:
1196 case 2:
1197 p->multicast_router = val;
1198 err = 0;
1199
1200 if (val < 2 && !hlist_unhashed(&p->rlist))
1201 hlist_del_init_rcu(&p->rlist);
1202
1203 if (val == 1)
1204 break;
1205
1206 del_timer(&p->multicast_router_timer);
1207
1208 if (val == 0)
1209 break;
1210
1211 br_multicast_add_router(br, p);
1212 break;
1213
1214 default:
1215 err = -EINVAL;
1216 break;
1217 }
1218
1219unlock:
1220 spin_unlock(&br->multicast_lock);
1221
1222 return err;
1223}
1224
1225int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1226{
1227 struct net_bridge_port *port;
1228 int err = -ENOENT;
1229
1230 spin_lock(&br->multicast_lock);
1231 if (!netif_running(br->dev))
1232 goto unlock;
1233
1234 err = 0;
1235 if (br->multicast_disabled == !val)
1236 goto unlock;
1237
1238 br->multicast_disabled = !val;
1239 if (br->multicast_disabled)
1240 goto unlock;
1241
1242 if (br->mdb) {
1243 if (br->mdb->old) {
1244 err = -EEXIST;
1245rollback:
1246 br->multicast_disabled = !!val;
1247 goto unlock;
1248 }
1249
1250 err = br_mdb_rehash(&br->mdb, br->mdb->max,
1251 br->hash_elasticity);
1252 if (err)
1253 goto rollback;
1254 }
1255
1256 br_multicast_open(br);
1257 list_for_each_entry(port, &br->port_list, list) {
1258 if (port->state == BR_STATE_DISABLED ||
1259 port->state == BR_STATE_BLOCKING)
1260 continue;
1261
1262 __br_multicast_enable_port(port);
1263 }
1264
1265unlock:
1266 spin_unlock(&br->multicast_lock);
1267
1268 return err;
1269}
1270
1271int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1272{
1273 int err = -ENOENT;
1274 u32 old;
1275
1276 spin_lock(&br->multicast_lock);
1277 if (!netif_running(br->dev))
1278 goto unlock;
1279
1280 err = -EINVAL;
1281 if (!is_power_of_2(val))
1282 goto unlock;
1283 if (br->mdb && val < br->mdb->size)
1284 goto unlock;
1285
1286 err = 0;
1287
1288 old = br->hash_max;
1289 br->hash_max = val;
1290
1291 if (br->mdb) {
1292 if (br->mdb->old) {
1293 err = -EEXIST;
1294rollback:
1295 br->hash_max = old;
1296 goto unlock;
1297 }
1298
1299 err = br_mdb_rehash(&br->mdb, br->hash_max,
1300 br->hash_elasticity);
1301 if (err)
1302 goto rollback;
1303 }
1304
1305unlock:
1306 spin_unlock(&br->multicast_lock);
1307
1308 return err;
1309}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 268e2e725888..4c4977d12fd6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <linux/ip.h> 27#include <linux/ip.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fcffb3fb1177..aa56ac2c8829 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14#include <net/rtnetlink.h> 15#include <net/rtnetlink.h>
15#include <net/net_namespace.h> 16#include <net/net_namespace.h>
16#include <net/sock.h> 17#include <net/sock.h>
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2114e45682ea..846d7d1e2075 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -57,6 +57,41 @@ struct net_bridge_fdb_entry
57 unsigned char is_static; 57 unsigned char is_static;
58}; 58};
59 59
60struct net_bridge_port_group {
61 struct net_bridge_port *port;
62 struct net_bridge_port_group *next;
63 struct hlist_node mglist;
64 struct rcu_head rcu;
65 struct timer_list timer;
66 struct timer_list query_timer;
67 __be32 addr;
68 u32 queries_sent;
69};
70
71struct net_bridge_mdb_entry
72{
73 struct hlist_node hlist[2];
74 struct hlist_node mglist;
75 struct net_bridge *br;
76 struct net_bridge_port_group *ports;
77 struct rcu_head rcu;
78 struct timer_list timer;
79 struct timer_list query_timer;
80 __be32 addr;
81 u32 queries_sent;
82};
83
84struct net_bridge_mdb_htable
85{
86 struct hlist_head *mhash;
87 struct rcu_head rcu;
88 struct net_bridge_mdb_htable *old;
89 u32 size;
90 u32 max;
91 u32 secret;
92 u32 ver;
93};
94
60struct net_bridge_port 95struct net_bridge_port
61{ 96{
62 struct net_bridge *br; 97 struct net_bridge *br;
@@ -84,6 +119,15 @@ struct net_bridge_port
84 119
85 unsigned long flags; 120 unsigned long flags;
86#define BR_HAIRPIN_MODE 0x00000001 121#define BR_HAIRPIN_MODE 0x00000001
122
123#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
124 u32 multicast_startup_queries_sent;
125 unsigned char multicast_router;
126 struct timer_list multicast_router_timer;
127 struct timer_list multicast_query_timer;
128 struct hlist_head mglist;
129 struct hlist_node rlist;
130#endif
87}; 131};
88 132
89struct net_bridge 133struct net_bridge
@@ -93,7 +137,6 @@ struct net_bridge
93 struct net_device *dev; 137 struct net_device *dev;
94 spinlock_t hash_lock; 138 spinlock_t hash_lock;
95 struct hlist_head hash[BR_HASH_SIZE]; 139 struct hlist_head hash[BR_HASH_SIZE];
96 struct list_head age_list;
97 unsigned long feature_mask; 140 unsigned long feature_mask;
98#ifdef CONFIG_BRIDGE_NETFILTER 141#ifdef CONFIG_BRIDGE_NETFILTER
99 struct rtable fake_rtable; 142 struct rtable fake_rtable;
@@ -125,6 +168,35 @@ struct net_bridge
125 unsigned char topology_change; 168 unsigned char topology_change;
126 unsigned char topology_change_detected; 169 unsigned char topology_change_detected;
127 170
171#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
172 unsigned char multicast_router;
173
174 u8 multicast_disabled:1;
175
176 u32 hash_elasticity;
177 u32 hash_max;
178
179 u32 multicast_last_member_count;
180 u32 multicast_startup_queries_sent;
181 u32 multicast_startup_query_count;
182
183 unsigned long multicast_last_member_interval;
184 unsigned long multicast_membership_interval;
185 unsigned long multicast_querier_interval;
186 unsigned long multicast_query_interval;
187 unsigned long multicast_query_response_interval;
188 unsigned long multicast_startup_query_interval;
189
190 spinlock_t multicast_lock;
191 struct net_bridge_mdb_htable *mdb;
192 struct hlist_head router_list;
193 struct hlist_head mglist;
194
195 struct timer_list multicast_router_timer;
196 struct timer_list multicast_querier_timer;
197 struct timer_list multicast_query_timer;
198#endif
199
128 struct timer_list hello_timer; 200 struct timer_list hello_timer;
129 struct timer_list tcn_timer; 201 struct timer_list tcn_timer;
130 struct timer_list topology_change_timer; 202 struct timer_list topology_change_timer;
@@ -132,6 +204,22 @@ struct net_bridge
132 struct kobject *ifobj; 204 struct kobject *ifobj;
133}; 205};
134 206
207struct br_input_skb_cb {
208 struct net_device *brdev;
209#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
210 int igmp;
211 int mrouters_only;
212#endif
213};
214
215#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
216
217#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
218# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (BR_INPUT_SKB_CB(__skb)->mrouters_only)
219#else
220# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0)
221#endif
222
135extern struct notifier_block br_device_notifier; 223extern struct notifier_block br_device_notifier;
136extern const u8 br_group_address[ETH_ALEN]; 224extern const u8 br_group_address[ETH_ALEN];
137 225
@@ -172,10 +260,11 @@ extern void br_deliver(const struct net_bridge_port *to,
172 struct sk_buff *skb); 260 struct sk_buff *skb);
173extern int br_dev_queue_push_xmit(struct sk_buff *skb); 261extern int br_dev_queue_push_xmit(struct sk_buff *skb);
174extern void br_forward(const struct net_bridge_port *to, 262extern void br_forward(const struct net_bridge_port *to,
175 struct sk_buff *skb); 263 struct sk_buff *skb, struct sk_buff *skb0);
176extern int br_forward_finish(struct sk_buff *skb); 264extern int br_forward_finish(struct sk_buff *skb);
177extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); 265extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
178extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb); 266extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
267 struct sk_buff *skb2);
179 268
180/* br_if.c */ 269/* br_if.c */
181extern void br_port_carrier_check(struct net_bridge_port *p); 270extern void br_port_carrier_check(struct net_bridge_port *p);
@@ -198,6 +287,94 @@ extern struct sk_buff *br_handle_frame(struct net_bridge_port *p,
198extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 287extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
199extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg); 288extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
200 289
290/* br_multicast.c */
291#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
292extern int br_multicast_rcv(struct net_bridge *br,
293 struct net_bridge_port *port,
294 struct sk_buff *skb);
295extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
296 struct sk_buff *skb);
297extern void br_multicast_add_port(struct net_bridge_port *port);
298extern void br_multicast_del_port(struct net_bridge_port *port);
299extern void br_multicast_enable_port(struct net_bridge_port *port);
300extern void br_multicast_disable_port(struct net_bridge_port *port);
301extern void br_multicast_init(struct net_bridge *br);
302extern void br_multicast_open(struct net_bridge *br);
303extern void br_multicast_stop(struct net_bridge *br);
304extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
305 struct sk_buff *skb);
306extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
307 struct sk_buff *skb, struct sk_buff *skb2);
308extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
309extern int br_multicast_set_port_router(struct net_bridge_port *p,
310 unsigned long val);
311extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
312extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
313
314static inline bool br_multicast_is_router(struct net_bridge *br)
315{
316 return br->multicast_router == 2 ||
317 (br->multicast_router == 1 &&
318 timer_pending(&br->multicast_router_timer));
319}
320#else
321static inline int br_multicast_rcv(struct net_bridge *br,
322 struct net_bridge_port *port,
323 struct sk_buff *skb)
324{
325 return 0;
326}
327
328static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
329 struct sk_buff *skb)
330{
331 return NULL;
332}
333
334static inline void br_multicast_add_port(struct net_bridge_port *port)
335{
336}
337
338static inline void br_multicast_del_port(struct net_bridge_port *port)
339{
340}
341
342static inline void br_multicast_enable_port(struct net_bridge_port *port)
343{
344}
345
346static inline void br_multicast_disable_port(struct net_bridge_port *port)
347{
348}
349
350static inline void br_multicast_init(struct net_bridge *br)
351{
352}
353
354static inline void br_multicast_open(struct net_bridge *br)
355{
356}
357
358static inline void br_multicast_stop(struct net_bridge *br)
359{
360}
361
362static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
363 struct sk_buff *skb)
364{
365}
366
367static inline void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
368 struct sk_buff *skb,
369 struct sk_buff *skb2)
370{
371}
372static inline bool br_multicast_is_router(struct net_bridge *br)
373{
374 return 0;
375}
376#endif
377
201/* br_netfilter.c */ 378/* br_netfilter.c */
202#ifdef CONFIG_BRIDGE_NETFILTER 379#ifdef CONFIG_BRIDGE_NETFILTER
203extern int br_netfilter_init(void); 380extern int br_netfilter_init(void);
@@ -254,7 +431,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
254 431
255#ifdef CONFIG_SYSFS 432#ifdef CONFIG_SYSFS
256/* br_sysfs_if.c */ 433/* br_sysfs_if.c */
257extern struct sysfs_ops brport_sysfs_ops; 434extern const struct sysfs_ops brport_sysfs_ops;
258extern int br_sysfs_addif(struct net_bridge_port *p); 435extern int br_sysfs_addif(struct net_bridge_port *p);
259 436
260/* br_sysfs_br.c */ 437/* br_sysfs_br.c */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index fd3f8d6c0998..edcf14b560f6 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -386,6 +386,8 @@ static void br_make_forwarding(struct net_bridge_port *p)
386 else 386 else
387 p->state = BR_STATE_LEARNING; 387 p->state = BR_STATE_LEARNING;
388 388
389 br_multicast_enable_port(p);
390
389 br_log_state(p); 391 br_log_state(p);
390 392
391 if (br->forward_delay != 0) 393 if (br->forward_delay != 0)
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 81ae40b3f655..d66cce11f3bf 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -15,6 +15,7 @@
15#include <linux/netfilter_bridge.h> 15#include <linux/netfilter_bridge.h>
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/llc.h> 17#include <linux/llc.h>
18#include <linux/slab.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19#include <net/llc.h> 20#include <net/llc.h>
20#include <net/llc_pdu.h> 21#include <net/llc_pdu.h>
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9a52ac5b4525..d527119e9f54 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -108,6 +108,7 @@ void br_stp_disable_port(struct net_bridge_port *p)
108 del_timer(&p->hold_timer); 108 del_timer(&p->hold_timer);
109 109
110 br_fdb_delete_by_port(br, p, 0); 110 br_fdb_delete_by_port(br, p, 0);
111 br_multicast_disable_port(p);
111 112
112 br_configuration_update(br); 113 br_configuration_update(br);
113 114
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index bee4f300d0c8..dd321e39e621 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -345,6 +345,273 @@ static ssize_t store_flush(struct device *d,
345} 345}
346static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush); 346static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush);
347 347
348#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
349static ssize_t show_multicast_router(struct device *d,
350 struct device_attribute *attr, char *buf)
351{
352 struct net_bridge *br = to_bridge(d);
353 return sprintf(buf, "%d\n", br->multicast_router);
354}
355
356static ssize_t store_multicast_router(struct device *d,
357 struct device_attribute *attr,
358 const char *buf, size_t len)
359{
360 return store_bridge_parm(d, buf, len, br_multicast_set_router);
361}
362static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
363 store_multicast_router);
364
365static ssize_t show_multicast_snooping(struct device *d,
366 struct device_attribute *attr,
367 char *buf)
368{
369 struct net_bridge *br = to_bridge(d);
370 return sprintf(buf, "%d\n", !br->multicast_disabled);
371}
372
373static ssize_t store_multicast_snooping(struct device *d,
374 struct device_attribute *attr,
375 const char *buf, size_t len)
376{
377 return store_bridge_parm(d, buf, len, br_multicast_toggle);
378}
379static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
380 show_multicast_snooping, store_multicast_snooping);
381
382static ssize_t show_hash_elasticity(struct device *d,
383 struct device_attribute *attr, char *buf)
384{
385 struct net_bridge *br = to_bridge(d);
386 return sprintf(buf, "%u\n", br->hash_elasticity);
387}
388
389static int set_elasticity(struct net_bridge *br, unsigned long val)
390{
391 br->hash_elasticity = val;
392 return 0;
393}
394
395static ssize_t store_hash_elasticity(struct device *d,
396 struct device_attribute *attr,
397 const char *buf, size_t len)
398{
399 return store_bridge_parm(d, buf, len, set_elasticity);
400}
401static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity,
402 store_hash_elasticity);
403
404static ssize_t show_hash_max(struct device *d, struct device_attribute *attr,
405 char *buf)
406{
407 struct net_bridge *br = to_bridge(d);
408 return sprintf(buf, "%u\n", br->hash_max);
409}
410
411static ssize_t store_hash_max(struct device *d, struct device_attribute *attr,
412 const char *buf, size_t len)
413{
414 return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
415}
416static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max,
417 store_hash_max);
418
419static ssize_t show_multicast_last_member_count(struct device *d,
420 struct device_attribute *attr,
421 char *buf)
422{
423 struct net_bridge *br = to_bridge(d);
424 return sprintf(buf, "%u\n", br->multicast_last_member_count);
425}
426
427static int set_last_member_count(struct net_bridge *br, unsigned long val)
428{
429 br->multicast_last_member_count = val;
430 return 0;
431}
432
433static ssize_t store_multicast_last_member_count(struct device *d,
434 struct device_attribute *attr,
435 const char *buf, size_t len)
436{
437 return store_bridge_parm(d, buf, len, set_last_member_count);
438}
439static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR,
440 show_multicast_last_member_count,
441 store_multicast_last_member_count);
442
443static ssize_t show_multicast_startup_query_count(
444 struct device *d, struct device_attribute *attr, char *buf)
445{
446 struct net_bridge *br = to_bridge(d);
447 return sprintf(buf, "%u\n", br->multicast_startup_query_count);
448}
449
450static int set_startup_query_count(struct net_bridge *br, unsigned long val)
451{
452 br->multicast_startup_query_count = val;
453 return 0;
454}
455
456static ssize_t store_multicast_startup_query_count(
457 struct device *d, struct device_attribute *attr, const char *buf,
458 size_t len)
459{
460 return store_bridge_parm(d, buf, len, set_startup_query_count);
461}
462static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR,
463 show_multicast_startup_query_count,
464 store_multicast_startup_query_count);
465
466static ssize_t show_multicast_last_member_interval(
467 struct device *d, struct device_attribute *attr, char *buf)
468{
469 struct net_bridge *br = to_bridge(d);
470 return sprintf(buf, "%lu\n",
471 jiffies_to_clock_t(br->multicast_last_member_interval));
472}
473
474static int set_last_member_interval(struct net_bridge *br, unsigned long val)
475{
476 br->multicast_last_member_interval = clock_t_to_jiffies(val);
477 return 0;
478}
479
480static ssize_t store_multicast_last_member_interval(
481 struct device *d, struct device_attribute *attr, const char *buf,
482 size_t len)
483{
484 return store_bridge_parm(d, buf, len, set_last_member_interval);
485}
486static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR,
487 show_multicast_last_member_interval,
488 store_multicast_last_member_interval);
489
490static ssize_t show_multicast_membership_interval(
491 struct device *d, struct device_attribute *attr, char *buf)
492{
493 struct net_bridge *br = to_bridge(d);
494 return sprintf(buf, "%lu\n",
495 jiffies_to_clock_t(br->multicast_membership_interval));
496}
497
498static int set_membership_interval(struct net_bridge *br, unsigned long val)
499{
500 br->multicast_membership_interval = clock_t_to_jiffies(val);
501 return 0;
502}
503
504static ssize_t store_multicast_membership_interval(
505 struct device *d, struct device_attribute *attr, const char *buf,
506 size_t len)
507{
508 return store_bridge_parm(d, buf, len, set_membership_interval);
509}
510static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR,
511 show_multicast_membership_interval,
512 store_multicast_membership_interval);
513
514static ssize_t show_multicast_querier_interval(struct device *d,
515 struct device_attribute *attr,
516 char *buf)
517{
518 struct net_bridge *br = to_bridge(d);
519 return sprintf(buf, "%lu\n",
520 jiffies_to_clock_t(br->multicast_querier_interval));
521}
522
523static int set_querier_interval(struct net_bridge *br, unsigned long val)
524{
525 br->multicast_querier_interval = clock_t_to_jiffies(val);
526 return 0;
527}
528
529static ssize_t store_multicast_querier_interval(struct device *d,
530 struct device_attribute *attr,
531 const char *buf, size_t len)
532{
533 return store_bridge_parm(d, buf, len, set_querier_interval);
534}
535static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR,
536 show_multicast_querier_interval,
537 store_multicast_querier_interval);
538
539static ssize_t show_multicast_query_interval(struct device *d,
540 struct device_attribute *attr,
541 char *buf)
542{
543 struct net_bridge *br = to_bridge(d);
544 return sprintf(buf, "%lu\n",
545 jiffies_to_clock_t(br->multicast_query_interval));
546}
547
548static int set_query_interval(struct net_bridge *br, unsigned long val)
549{
550 br->multicast_query_interval = clock_t_to_jiffies(val);
551 return 0;
552}
553
554static ssize_t store_multicast_query_interval(struct device *d,
555 struct device_attribute *attr,
556 const char *buf, size_t len)
557{
558 return store_bridge_parm(d, buf, len, set_query_interval);
559}
560static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR,
561 show_multicast_query_interval,
562 store_multicast_query_interval);
563
564static ssize_t show_multicast_query_response_interval(
565 struct device *d, struct device_attribute *attr, char *buf)
566{
567 struct net_bridge *br = to_bridge(d);
568 return sprintf(
569 buf, "%lu\n",
570 jiffies_to_clock_t(br->multicast_query_response_interval));
571}
572
573static int set_query_response_interval(struct net_bridge *br, unsigned long val)
574{
575 br->multicast_query_response_interval = clock_t_to_jiffies(val);
576 return 0;
577}
578
579static ssize_t store_multicast_query_response_interval(
580 struct device *d, struct device_attribute *attr, const char *buf,
581 size_t len)
582{
583 return store_bridge_parm(d, buf, len, set_query_response_interval);
584}
585static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR,
586 show_multicast_query_response_interval,
587 store_multicast_query_response_interval);
588
589static ssize_t show_multicast_startup_query_interval(
590 struct device *d, struct device_attribute *attr, char *buf)
591{
592 struct net_bridge *br = to_bridge(d);
593 return sprintf(
594 buf, "%lu\n",
595 jiffies_to_clock_t(br->multicast_startup_query_interval));
596}
597
598static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
599{
600 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
601 return 0;
602}
603
604static ssize_t store_multicast_startup_query_interval(
605 struct device *d, struct device_attribute *attr, const char *buf,
606 size_t len)
607{
608 return store_bridge_parm(d, buf, len, set_startup_query_interval);
609}
610static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR,
611 show_multicast_startup_query_interval,
612 store_multicast_startup_query_interval);
613#endif
614
348static struct attribute *bridge_attrs[] = { 615static struct attribute *bridge_attrs[] = {
349 &dev_attr_forward_delay.attr, 616 &dev_attr_forward_delay.attr,
350 &dev_attr_hello_time.attr, 617 &dev_attr_hello_time.attr,
@@ -364,6 +631,20 @@ static struct attribute *bridge_attrs[] = {
364 &dev_attr_gc_timer.attr, 631 &dev_attr_gc_timer.attr,
365 &dev_attr_group_addr.attr, 632 &dev_attr_group_addr.attr,
366 &dev_attr_flush.attr, 633 &dev_attr_flush.attr,
634#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
635 &dev_attr_multicast_router.attr,
636 &dev_attr_multicast_snooping.attr,
637 &dev_attr_hash_elasticity.attr,
638 &dev_attr_hash_max.attr,
639 &dev_attr_multicast_last_member_count.attr,
640 &dev_attr_multicast_startup_query_count.attr,
641 &dev_attr_multicast_last_member_interval.attr,
642 &dev_attr_multicast_membership_interval.attr,
643 &dev_attr_multicast_querier_interval.attr,
644 &dev_attr_multicast_query_interval.attr,
645 &dev_attr_multicast_query_response_interval.attr,
646 &dev_attr_multicast_startup_query_interval.attr,
647#endif
367 NULL 648 NULL
368}; 649};
369 650
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 820643a3ba9c..0b9916489d6b 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -159,6 +159,21 @@ static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
159static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR, 159static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
160 show_hairpin_mode, store_hairpin_mode); 160 show_hairpin_mode, store_hairpin_mode);
161 161
162#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
163static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
164{
165 return sprintf(buf, "%d\n", p->multicast_router);
166}
167
168static ssize_t store_multicast_router(struct net_bridge_port *p,
169 unsigned long v)
170{
171 return br_multicast_set_port_router(p, v);
172}
173static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
174 store_multicast_router);
175#endif
176
162static struct brport_attribute *brport_attrs[] = { 177static struct brport_attribute *brport_attrs[] = {
163 &brport_attr_path_cost, 178 &brport_attr_path_cost,
164 &brport_attr_priority, 179 &brport_attr_priority,
@@ -176,6 +191,9 @@ static struct brport_attribute *brport_attrs[] = {
176 &brport_attr_hold_timer, 191 &brport_attr_hold_timer,
177 &brport_attr_flush, 192 &brport_attr_flush,
178 &brport_attr_hairpin_mode, 193 &brport_attr_hairpin_mode,
194#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
195 &brport_attr_multicast_router,
196#endif
179 NULL 197 NULL
180}; 198};
181 199
@@ -220,7 +238,7 @@ static ssize_t brport_store(struct kobject * kobj,
220 return ret; 238 return ret;
221} 239}
222 240
223struct sysfs_ops brport_sysfs_ops = { 241const struct sysfs_ops brport_sysfs_ops = {
224 .show = brport_show, 242 .show = brport_show,
225 .store = brport_store, 243 .store = brport_store,
226}; 244};
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index bd91dc58d49b..5d1176758ca5 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = {
52 .family = NFPROTO_BRIDGE, 52 .family = NFPROTO_BRIDGE,
53 .match = ebt_802_3_mt, 53 .match = ebt_802_3_mt,
54 .checkentry = ebt_802_3_mt_check, 54 .checkentry = ebt_802_3_mt_check,
55 .matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)), 55 .matchsize = sizeof(struct ebt_802_3_info),
56 .me = THIS_MODULE, 56 .me = THIS_MODULE,
57}; 57};
58 58
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index b7ad60419f9a..e727697c5847 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = {
120 .family = NFPROTO_BRIDGE, 120 .family = NFPROTO_BRIDGE,
121 .match = ebt_arp_mt, 121 .match = ebt_arp_mt,
122 .checkentry = ebt_arp_mt_check, 122 .checkentry = ebt_arp_mt_check,
123 .matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)), 123 .matchsize = sizeof(struct ebt_arp_info),
124 .me = THIS_MODULE, 124 .me = THIS_MODULE,
125}; 125};
126 126
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 76584cd72e57..f392e9d93f53 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING), 78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
79 .target = ebt_arpreply_tg, 79 .target = ebt_arpreply_tg,
80 .checkentry = ebt_arpreply_tg_check, 80 .checkentry = ebt_arpreply_tg_check,
81 .targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)), 81 .targetsize = sizeof(struct ebt_arpreply_info),
82 .me = THIS_MODULE, 82 .me = THIS_MODULE,
83}; 83};
84 84
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 6b49ea9e31fb..2bb40d728a35 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = {
54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING), 54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
55 .target = ebt_dnat_tg, 55 .target = ebt_dnat_tg,
56 .checkentry = ebt_dnat_tg_check, 56 .checkentry = ebt_dnat_tg_check,
57 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 57 .targetsize = sizeof(struct ebt_nat_info),
58 .me = THIS_MODULE, 58 .me = THIS_MODULE,
59}; 59};
60 60
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index d771bbfbcbe6..5de6df6f86b8 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = {
110 .family = NFPROTO_BRIDGE, 110 .family = NFPROTO_BRIDGE,
111 .match = ebt_ip_mt, 111 .match = ebt_ip_mt,
112 .checkentry = ebt_ip_mt_check, 112 .checkentry = ebt_ip_mt_check,
113 .matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)), 113 .matchsize = sizeof(struct ebt_ip_info),
114 .me = THIS_MODULE, 114 .me = THIS_MODULE,
115}; 115};
116 116
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 784a6573876c..bbf2534ef026 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = {
122 .family = NFPROTO_BRIDGE, 122 .family = NFPROTO_BRIDGE,
123 .match = ebt_ip6_mt, 123 .match = ebt_ip6_mt,
124 .checkentry = ebt_ip6_mt_check, 124 .checkentry = ebt_ip6_mt_check,
125 .matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)), 125 .matchsize = sizeof(struct ebt_ip6_info),
126 .me = THIS_MODULE, 126 .me = THIS_MODULE,
127}; 127};
128 128
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index f7bd9192ff0c..7a8182710eb3 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -84,13 +84,29 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
84 return true; 84 return true;
85} 85}
86 86
87
88#ifdef CONFIG_COMPAT
89/*
90 * no conversion function needed --
91 * only avg/burst have meaningful values in userspace.
92 */
93struct ebt_compat_limit_info {
94 compat_uint_t avg, burst;
95 compat_ulong_t prev;
96 compat_uint_t credit, credit_cap, cost;
97};
98#endif
99
87static struct xt_match ebt_limit_mt_reg __read_mostly = { 100static struct xt_match ebt_limit_mt_reg __read_mostly = {
88 .name = "limit", 101 .name = "limit",
89 .revision = 0, 102 .revision = 0,
90 .family = NFPROTO_BRIDGE, 103 .family = NFPROTO_BRIDGE,
91 .match = ebt_limit_mt, 104 .match = ebt_limit_mt,
92 .checkentry = ebt_limit_mt_check, 105 .checkentry = ebt_limit_mt_check,
93 .matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)), 106 .matchsize = sizeof(struct ebt_limit_info),
107#ifdef CONFIG_COMPAT
108 .compatsize = sizeof(struct ebt_compat_limit_info),
109#endif
94 .me = THIS_MODULE, 110 .me = THIS_MODULE,
95}; 111};
96 112
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e4ea3fdd1d41..e873924ddb5d 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
195 .family = NFPROTO_BRIDGE, 195 .family = NFPROTO_BRIDGE,
196 .target = ebt_log_tg, 196 .target = ebt_log_tg,
197 .checkentry = ebt_log_tg_check, 197 .checkentry = ebt_log_tg_check,
198 .targetsize = XT_ALIGN(sizeof(struct ebt_log_info)), 198 .targetsize = sizeof(struct ebt_log_info),
199 .me = THIS_MODULE, 199 .me = THIS_MODULE,
200}; 200};
201 201
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 2fee7e8e2e93..2b5ce533d6b9 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -52,6 +52,32 @@ static bool ebt_mark_tg_check(const struct xt_tgchk_param *par)
52 return false; 52 return false;
53 return true; 53 return true;
54} 54}
55#ifdef CONFIG_COMPAT
56struct compat_ebt_mark_t_info {
57 compat_ulong_t mark;
58 compat_uint_t target;
59};
60
61static void mark_tg_compat_from_user(void *dst, const void *src)
62{
63 const struct compat_ebt_mark_t_info *user = src;
64 struct ebt_mark_t_info *kern = dst;
65
66 kern->mark = user->mark;
67 kern->target = user->target;
68}
69
70static int mark_tg_compat_to_user(void __user *dst, const void *src)
71{
72 struct compat_ebt_mark_t_info __user *user = dst;
73 const struct ebt_mark_t_info *kern = src;
74
75 if (put_user(kern->mark, &user->mark) ||
76 put_user(kern->target, &user->target))
77 return -EFAULT;
78 return 0;
79}
80#endif
55 81
56static struct xt_target ebt_mark_tg_reg __read_mostly = { 82static struct xt_target ebt_mark_tg_reg __read_mostly = {
57 .name = "mark", 83 .name = "mark",
@@ -59,7 +85,12 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = {
59 .family = NFPROTO_BRIDGE, 85 .family = NFPROTO_BRIDGE,
60 .target = ebt_mark_tg, 86 .target = ebt_mark_tg,
61 .checkentry = ebt_mark_tg_check, 87 .checkentry = ebt_mark_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)), 88 .targetsize = sizeof(struct ebt_mark_t_info),
89#ifdef CONFIG_COMPAT
90 .compatsize = sizeof(struct compat_ebt_mark_t_info),
91 .compat_from_user = mark_tg_compat_from_user,
92 .compat_to_user = mark_tg_compat_to_user,
93#endif
63 .me = THIS_MODULE, 94 .me = THIS_MODULE,
64}; 95};
65 96
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index ea570f214b1d..8de8c396d913 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -35,13 +35,50 @@ static bool ebt_mark_mt_check(const struct xt_mtchk_param *par)
35 return true; 35 return true;
36} 36}
37 37
38
39#ifdef CONFIG_COMPAT
40struct compat_ebt_mark_m_info {
41 compat_ulong_t mark, mask;
42 uint8_t invert, bitmask;
43};
44
45static void mark_mt_compat_from_user(void *dst, const void *src)
46{
47 const struct compat_ebt_mark_m_info *user = src;
48 struct ebt_mark_m_info *kern = dst;
49
50 kern->mark = user->mark;
51 kern->mask = user->mask;
52 kern->invert = user->invert;
53 kern->bitmask = user->bitmask;
54}
55
56static int mark_mt_compat_to_user(void __user *dst, const void *src)
57{
58 struct compat_ebt_mark_m_info __user *user = dst;
59 const struct ebt_mark_m_info *kern = src;
60
61 if (put_user(kern->mark, &user->mark) ||
62 put_user(kern->mask, &user->mask) ||
63 put_user(kern->invert, &user->invert) ||
64 put_user(kern->bitmask, &user->bitmask))
65 return -EFAULT;
66 return 0;
67}
68#endif
69
38static struct xt_match ebt_mark_mt_reg __read_mostly = { 70static struct xt_match ebt_mark_mt_reg __read_mostly = {
39 .name = "mark_m", 71 .name = "mark_m",
40 .revision = 0, 72 .revision = 0,
41 .family = NFPROTO_BRIDGE, 73 .family = NFPROTO_BRIDGE,
42 .match = ebt_mark_mt, 74 .match = ebt_mark_mt,
43 .checkentry = ebt_mark_mt_check, 75 .checkentry = ebt_mark_mt_check,
44 .matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)), 76 .matchsize = sizeof(struct ebt_mark_m_info),
77#ifdef CONFIG_COMPAT
78 .compatsize = sizeof(struct compat_ebt_mark_m_info),
79 .compat_from_user = mark_mt_compat_from_user,
80 .compat_to_user = mark_mt_compat_to_user,
81#endif
45 .me = THIS_MODULE, 82 .me = THIS_MODULE,
46}; 83};
47 84
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 2a63d996dd4e..40dbd248b9ae 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = {
51 .family = NFPROTO_BRIDGE, 51 .family = NFPROTO_BRIDGE,
52 .target = ebt_nflog_tg, 52 .target = ebt_nflog_tg,
53 .checkentry = ebt_nflog_tg_check, 53 .checkentry = ebt_nflog_tg_check,
54 .targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)), 54 .targetsize = sizeof(struct ebt_nflog_info),
55 .me = THIS_MODULE, 55 .me = THIS_MODULE,
56}; 56};
57 57
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index 883e96e2a542..e2a07e6cbef3 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
36 .family = NFPROTO_BRIDGE, 36 .family = NFPROTO_BRIDGE,
37 .match = ebt_pkttype_mt, 37 .match = ebt_pkttype_mt,
38 .checkentry = ebt_pkttype_mt_check, 38 .checkentry = ebt_pkttype_mt_check,
39 .matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)), 39 .matchsize = sizeof(struct ebt_pkttype_info),
40 .me = THIS_MODULE, 40 .me = THIS_MODULE,
41}; 41};
42 42
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index c8a49f7a57ba..9be8fbcd370b 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = {
59 (1 << NF_BR_BROUTING), 59 (1 << NF_BR_BROUTING),
60 .target = ebt_redirect_tg, 60 .target = ebt_redirect_tg,
61 .checkentry = ebt_redirect_tg_check, 61 .checkentry = ebt_redirect_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)), 62 .targetsize = sizeof(struct ebt_redirect_info),
63 .me = THIS_MODULE, 63 .me = THIS_MODULE,
64}; 64};
65 65
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 8d04d4c302bd..9c7b520765a2 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = {
67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING), 67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
68 .target = ebt_snat_tg, 68 .target = ebt_snat_tg,
69 .checkentry = ebt_snat_tg_check, 69 .checkentry = ebt_snat_tg_check,
70 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 70 .targetsize = sizeof(struct ebt_nat_info),
71 .me = THIS_MODULE, 71 .me = THIS_MODULE,
72}; 72};
73 73
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 75e29a9cebda..92a93d363765 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = {
177 .family = NFPROTO_BRIDGE, 177 .family = NFPROTO_BRIDGE,
178 .match = ebt_stp_mt, 178 .match = ebt_stp_mt,
179 .checkentry = ebt_stp_mt_check, 179 .checkentry = ebt_stp_mt_check,
180 .matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)), 180 .matchsize = sizeof(struct ebt_stp_info),
181 .me = THIS_MODULE, 181 .me = THIS_MODULE,
182}; 182};
183 183
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce50688a6431..f9560f3dbdc7 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h>
32#include <linux/spinlock.h> 33#include <linux/spinlock.h>
33#include <linux/socket.h> 34#include <linux/socket.h>
34#include <linux/skbuff.h> 35#include <linux/skbuff.h>
@@ -275,7 +276,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = {
275 .family = NFPROTO_BRIDGE, 276 .family = NFPROTO_BRIDGE,
276 .target = ebt_ulog_tg, 277 .target = ebt_ulog_tg,
277 .checkentry = ebt_ulog_tg_check, 278 .checkentry = ebt_ulog_tg_check,
278 .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)), 279 .targetsize = sizeof(struct ebt_ulog_info),
279 .me = THIS_MODULE, 280 .me = THIS_MODULE,
280}; 281};
281 282
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 3dddd489328e..be1dd2e1f615 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
163 .family = NFPROTO_BRIDGE, 163 .family = NFPROTO_BRIDGE,
164 .match = ebt_vlan_mt, 164 .match = ebt_vlan_mt,
165 .checkentry = ebt_vlan_mt_check, 165 .checkentry = ebt_vlan_mt_check,
166 .matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)), 166 .matchsize = sizeof(struct ebt_vlan_info),
167 .me = THIS_MODULE, 167 .me = THIS_MODULE,
168}; 168};
169 169
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index d32ab13e728c..ae3f106c3908 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net)
71 71
72static void __net_exit broute_net_exit(struct net *net) 72static void __net_exit broute_net_exit(struct net *net)
73{ 73{
74 ebt_unregister_table(net->xt.broute_table); 74 ebt_unregister_table(net, net->xt.broute_table);
75} 75}
76 76
77static struct pernet_operations broute_net_ops = { 77static struct pernet_operations broute_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 60b1a6ca7185..42e6bd094574 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net)
107 107
108static void __net_exit frame_filter_net_exit(struct net *net) 108static void __net_exit frame_filter_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_filter); 110 ebt_unregister_table(net, net->xt.frame_filter);
111} 111}
112 112
113static struct pernet_operations frame_filter_net_ops = { 113static struct pernet_operations frame_filter_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 4a98804203b0..6dc2f878ae05 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net)
107 107
108static void __net_exit frame_nat_net_exit(struct net *net) 108static void __net_exit frame_nat_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_nat); 110 ebt_unregister_table(net, net->xt.frame_nat);
111} 111}
112 112
113static struct pernet_operations frame_nat_net_ops = { 113static struct pernet_operations frame_nat_net_ops = {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0b7f262cd148..f0865fd1e3ec 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -23,6 +23,7 @@
23#include <linux/netfilter_bridge/ebtables.h> 23#include <linux/netfilter_bridge/ebtables.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/slab.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <linux/smp.h> 28#include <linux/smp.h>
28#include <linux/cpumask.h> 29#include <linux/cpumask.h>
@@ -33,11 +34,6 @@
33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ 34#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args) 35 "report to author: "format, ## args)
35/* #define BUGPRINT(format, args...) */ 36/* #define BUGPRINT(format, args...) */
36#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
37 ": out of memory: "format, ## args)
38/* #define MEMPRINT(format, args...) */
39
40
41 37
42/* 38/*
43 * Each cpu has its own set of counters, so there is no need for write_lock in 39 * Each cpu has its own set of counters, so there is no need for write_lock in
@@ -56,11 +52,37 @@
56 52
57static DEFINE_MUTEX(ebt_mutex); 53static DEFINE_MUTEX(ebt_mutex);
58 54
55#ifdef CONFIG_COMPAT
56static void ebt_standard_compat_from_user(void *dst, const void *src)
57{
58 int v = *(compat_int_t *)src;
59
60 if (v >= 0)
61 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62 memcpy(dst, &v, sizeof(v));
63}
64
65static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66{
67 compat_int_t cv = *(int *)src;
68
69 if (cv >= 0)
70 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72}
73#endif
74
75
59static struct xt_target ebt_standard_target = { 76static struct xt_target ebt_standard_target = {
60 .name = "standard", 77 .name = "standard",
61 .revision = 0, 78 .revision = 0,
62 .family = NFPROTO_BRIDGE, 79 .family = NFPROTO_BRIDGE,
63 .targetsize = sizeof(int), 80 .targetsize = sizeof(int),
81#ifdef CONFIG_COMPAT
82 .compatsize = sizeof(compat_int_t),
83 .compat_from_user = ebt_standard_compat_from_user,
84 .compat_to_user = ebt_standard_compat_to_user,
85#endif
64}; 86};
65 87
66static inline int 88static inline int
@@ -82,7 +104,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
82 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; 104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
83} 105}
84 106
85static inline int ebt_dev_check(char *entry, const struct net_device *device) 107static inline int
108ebt_dev_check(const char *entry, const struct net_device *device)
86{ 109{
87 int i = 0; 110 int i = 0;
88 const char *devname; 111 const char *devname;
@@ -100,8 +123,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device)
100 123
101#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg)) 124#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
102/* process standard matches */ 125/* process standard matches */
103static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h, 126static inline int
104 const struct net_device *in, const struct net_device *out) 127ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
105{ 129{
106 int verdict, i; 130 int verdict, i;
107 131
@@ -156,12 +180,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
156 int i, nentries; 180 int i, nentries;
157 struct ebt_entry *point; 181 struct ebt_entry *point;
158 struct ebt_counter *counter_base, *cb_base; 182 struct ebt_counter *counter_base, *cb_base;
159 struct ebt_entry_target *t; 183 const struct ebt_entry_target *t;
160 int verdict, sp = 0; 184 int verdict, sp = 0;
161 struct ebt_chainstack *cs; 185 struct ebt_chainstack *cs;
162 struct ebt_entries *chaininfo; 186 struct ebt_entries *chaininfo;
163 char *base; 187 const char *base;
164 struct ebt_table_info *private; 188 const struct ebt_table_info *private;
165 bool hotdrop = false; 189 bool hotdrop = false;
166 struct xt_match_param mtpar; 190 struct xt_match_param mtpar;
167 struct xt_target_param tgpar; 191 struct xt_target_param tgpar;
@@ -395,7 +419,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
395 return 0; 419 return 0;
396} 420}
397 421
398static int ebt_verify_pointers(struct ebt_replace *repl, 422static int ebt_verify_pointers(const struct ebt_replace *repl,
399 struct ebt_table_info *newinfo) 423 struct ebt_table_info *newinfo)
400{ 424{
401 unsigned int limit = repl->entries_size; 425 unsigned int limit = repl->entries_size;
@@ -442,6 +466,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
442 break; 466 break;
443 if (left < e->next_offset) 467 if (left < e->next_offset)
444 break; 468 break;
469 if (e->next_offset < sizeof(struct ebt_entry))
470 return -EINVAL;
445 offset += e->next_offset; 471 offset += e->next_offset;
446 } 472 }
447 } 473 }
@@ -466,8 +492,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
466 * to parse the userspace data 492 * to parse the userspace data
467 */ 493 */
468static inline int 494static inline int
469ebt_check_entry_size_and_hooks(struct ebt_entry *e, 495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
470 struct ebt_table_info *newinfo, 496 const struct ebt_table_info *newinfo,
471 unsigned int *n, unsigned int *cnt, 497 unsigned int *n, unsigned int *cnt,
472 unsigned int *totalcnt, unsigned int *udc_cnt) 498 unsigned int *totalcnt, unsigned int *udc_cnt)
473{ 499{
@@ -561,13 +587,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
561} 587}
562 588
563static inline int 589static inline int
564ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) 590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
565{ 591{
566 struct xt_mtdtor_param par; 592 struct xt_mtdtor_param par;
567 593
568 if (i && (*i)-- == 0) 594 if (i && (*i)-- == 0)
569 return 1; 595 return 1;
570 596
597 par.net = net;
571 par.match = m->u.match; 598 par.match = m->u.match;
572 par.matchinfo = m->data; 599 par.matchinfo = m->data;
573 par.family = NFPROTO_BRIDGE; 600 par.family = NFPROTO_BRIDGE;
@@ -578,13 +605,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
578} 605}
579 606
580static inline int 607static inline int
581ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) 608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
582{ 609{
583 struct xt_tgdtor_param par; 610 struct xt_tgdtor_param par;
584 611
585 if (i && (*i)-- == 0) 612 if (i && (*i)-- == 0)
586 return 1; 613 return 1;
587 614
615 par.net = net;
588 par.target = w->u.watcher; 616 par.target = w->u.watcher;
589 par.targinfo = w->data; 617 par.targinfo = w->data;
590 par.family = NFPROTO_BRIDGE; 618 par.family = NFPROTO_BRIDGE;
@@ -595,7 +623,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
595} 623}
596 624
597static inline int 625static inline int
598ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) 626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
599{ 627{
600 struct xt_tgdtor_param par; 628 struct xt_tgdtor_param par;
601 struct ebt_entry_target *t; 629 struct ebt_entry_target *t;
@@ -605,10 +633,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
605 /* we're done */ 633 /* we're done */
606 if (cnt && (*cnt)-- == 0) 634 if (cnt && (*cnt)-- == 0)
607 return 1; 635 return 1;
608 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL); 636 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
609 EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL); 637 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
610 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 638 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
611 639
640 par.net = net;
612 par.target = t->u.target; 641 par.target = t->u.target;
613 par.targinfo = t->data; 642 par.targinfo = t->data;
614 par.family = NFPROTO_BRIDGE; 643 par.family = NFPROTO_BRIDGE;
@@ -619,7 +648,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
619} 648}
620 649
621static inline int 650static inline int
622ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, 651ebt_check_entry(struct ebt_entry *e, struct net *net,
652 const struct ebt_table_info *newinfo,
623 const char *name, unsigned int *cnt, 653 const char *name, unsigned int *cnt,
624 struct ebt_cl_stack *cl_s, unsigned int udc_cnt) 654 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
625{ 655{
@@ -671,6 +701,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
671 } 701 }
672 i = 0; 702 i = 0;
673 703
704 mtpar.net = tgpar.net = net;
674 mtpar.table = tgpar.table = name; 705 mtpar.table = tgpar.table = name;
675 mtpar.entryinfo = tgpar.entryinfo = e; 706 mtpar.entryinfo = tgpar.entryinfo = e;
676 mtpar.hook_mask = tgpar.hook_mask = hookmask; 707 mtpar.hook_mask = tgpar.hook_mask = hookmask;
@@ -726,9 +757,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
726 (*cnt)++; 757 (*cnt)++;
727 return 0; 758 return 0;
728cleanup_watchers: 759cleanup_watchers:
729 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j); 760 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
730cleanup_matches: 761cleanup_matches:
731 EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i); 762 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
732 return ret; 763 return ret;
733} 764}
734 765
@@ -737,12 +768,12 @@ cleanup_matches:
737 * the hook mask for udc tells us from which base chains the udc can be 768 * the hook mask for udc tells us from which base chains the udc can be
738 * accessed. This mask is a parameter to the check() functions of the extensions 769 * accessed. This mask is a parameter to the check() functions of the extensions
739 */ 770 */
740static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s, 771static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
741 unsigned int udc_cnt, unsigned int hooknr, char *base) 772 unsigned int udc_cnt, unsigned int hooknr, char *base)
742{ 773{
743 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; 774 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
744 struct ebt_entry *e = (struct ebt_entry *)chain->data; 775 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
745 struct ebt_entry_target *t; 776 const struct ebt_entry_target *t;
746 777
747 while (pos < nentries || chain_nr != -1) { 778 while (pos < nentries || chain_nr != -1) {
748 /* end of udc, go back one 'recursion' step */ 779 /* end of udc, go back one 'recursion' step */
@@ -808,7 +839,8 @@ letscontinue:
808} 839}
809 840
810/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ 841/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
811static int translate_table(char *name, struct ebt_table_info *newinfo) 842static int translate_table(struct net *net, const char *name,
843 struct ebt_table_info *newinfo)
812{ 844{
813 unsigned int i, j, k, udc_cnt; 845 unsigned int i, j, k, udc_cnt;
814 int ret; 846 int ret;
@@ -917,17 +949,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
917 /* used to know what we need to clean up if something goes wrong */ 949 /* used to know what we need to clean up if something goes wrong */
918 i = 0; 950 i = 0;
919 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 951 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
920 ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt); 952 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
921 if (ret != 0) { 953 if (ret != 0) {
922 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 954 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
923 ebt_cleanup_entry, &i); 955 ebt_cleanup_entry, net, &i);
924 } 956 }
925 vfree(cl_s); 957 vfree(cl_s);
926 return ret; 958 return ret;
927} 959}
928 960
929/* called under write_lock */ 961/* called under write_lock */
930static void get_counters(struct ebt_counter *oldcounters, 962static void get_counters(const struct ebt_counter *oldcounters,
931 struct ebt_counter *counters, unsigned int nentries) 963 struct ebt_counter *counters, unsigned int nentries)
932{ 964{
933 int i, cpu; 965 int i, cpu;
@@ -949,90 +981,45 @@ static void get_counters(struct ebt_counter *oldcounters,
949 } 981 }
950} 982}
951 983
952/* replace the table */ 984static int do_replace_finish(struct net *net, struct ebt_replace *repl,
953static int do_replace(struct net *net, void __user *user, unsigned int len) 985 struct ebt_table_info *newinfo)
954{ 986{
955 int ret, i, countersize; 987 int ret, i;
956 struct ebt_table_info *newinfo;
957 struct ebt_replace tmp;
958 struct ebt_table *t;
959 struct ebt_counter *counterstmp = NULL; 988 struct ebt_counter *counterstmp = NULL;
960 /* used to be able to unlock earlier */ 989 /* used to be able to unlock earlier */
961 struct ebt_table_info *table; 990 struct ebt_table_info *table;
962 991 struct ebt_table *t;
963 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
964 return -EFAULT;
965
966 if (len != sizeof(tmp) + tmp.entries_size) {
967 BUGPRINT("Wrong len argument\n");
968 return -EINVAL;
969 }
970
971 if (tmp.entries_size == 0) {
972 BUGPRINT("Entries_size never zero\n");
973 return -EINVAL;
974 }
975 /* overflow check */
976 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
977 SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
978 return -ENOMEM;
979 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
980 return -ENOMEM;
981
982 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
983 newinfo = vmalloc(sizeof(*newinfo) + countersize);
984 if (!newinfo)
985 return -ENOMEM;
986
987 if (countersize)
988 memset(newinfo->counters, 0, countersize);
989
990 newinfo->entries = vmalloc(tmp.entries_size);
991 if (!newinfo->entries) {
992 ret = -ENOMEM;
993 goto free_newinfo;
994 }
995 if (copy_from_user(
996 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
997 BUGPRINT("Couldn't copy entries from userspace\n");
998 ret = -EFAULT;
999 goto free_entries;
1000 }
1001 992
1002 /* the user wants counters back 993 /* the user wants counters back
1003 the check on the size is done later, when we have the lock */ 994 the check on the size is done later, when we have the lock */
1004 if (tmp.num_counters) { 995 if (repl->num_counters) {
1005 counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp)); 996 unsigned long size = repl->num_counters * sizeof(*counterstmp);
1006 if (!counterstmp) { 997 counterstmp = vmalloc(size);
1007 ret = -ENOMEM; 998 if (!counterstmp)
1008 goto free_entries; 999 return -ENOMEM;
1009 }
1010 } 1000 }
1011 else
1012 counterstmp = NULL;
1013 1001
1014 /* this can get initialized by translate_table() */
1015 newinfo->chainstack = NULL; 1002 newinfo->chainstack = NULL;
1016 ret = ebt_verify_pointers(&tmp, newinfo); 1003 ret = ebt_verify_pointers(repl, newinfo);
1017 if (ret != 0) 1004 if (ret != 0)
1018 goto free_counterstmp; 1005 goto free_counterstmp;
1019 1006
1020 ret = translate_table(tmp.name, newinfo); 1007 ret = translate_table(net, repl->name, newinfo);
1021 1008
1022 if (ret != 0) 1009 if (ret != 0)
1023 goto free_counterstmp; 1010 goto free_counterstmp;
1024 1011
1025 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 1012 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1026 if (!t) { 1013 if (!t) {
1027 ret = -ENOENT; 1014 ret = -ENOENT;
1028 goto free_iterate; 1015 goto free_iterate;
1029 } 1016 }
1030 1017
1031 /* the table doesn't like it */ 1018 /* the table doesn't like it */
1032 if (t->check && (ret = t->check(newinfo, tmp.valid_hooks))) 1019 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1033 goto free_unlock; 1020 goto free_unlock;
1034 1021
1035 if (tmp.num_counters && tmp.num_counters != t->private->nentries) { 1022 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1036 BUGPRINT("Wrong nr. of counters requested\n"); 1023 BUGPRINT("Wrong nr. of counters requested\n");
1037 ret = -EINVAL; 1024 ret = -EINVAL;
1038 goto free_unlock; 1025 goto free_unlock;
@@ -1048,7 +1035,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1048 module_put(t->me); 1035 module_put(t->me);
1049 /* we need an atomic snapshot of the counters */ 1036 /* we need an atomic snapshot of the counters */
1050 write_lock_bh(&t->lock); 1037 write_lock_bh(&t->lock);
1051 if (tmp.num_counters) 1038 if (repl->num_counters)
1052 get_counters(t->private->counters, counterstmp, 1039 get_counters(t->private->counters, counterstmp,
1053 t->private->nentries); 1040 t->private->nentries);
1054 1041
@@ -1059,10 +1046,9 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1059 allocation. Only reason why this is done is because this way the lock 1046 allocation. Only reason why this is done is because this way the lock
1060 is held only once, while this doesn't bring the kernel into a 1047 is held only once, while this doesn't bring the kernel into a
1061 dangerous state. */ 1048 dangerous state. */
1062 if (tmp.num_counters && 1049 if (repl->num_counters &&
1063 copy_to_user(tmp.counters, counterstmp, 1050 copy_to_user(repl->counters, counterstmp,
1064 tmp.num_counters * sizeof(struct ebt_counter))) { 1051 repl->num_counters * sizeof(struct ebt_counter))) {
1065 BUGPRINT("Couldn't copy counters to userspace\n");
1066 ret = -EFAULT; 1052 ret = -EFAULT;
1067 } 1053 }
1068 else 1054 else
@@ -1070,7 +1056,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1070 1056
1071 /* decrease module count and free resources */ 1057 /* decrease module count and free resources */
1072 EBT_ENTRY_ITERATE(table->entries, table->entries_size, 1058 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1073 ebt_cleanup_entry, NULL); 1059 ebt_cleanup_entry, net, NULL);
1074 1060
1075 vfree(table->entries); 1061 vfree(table->entries);
1076 if (table->chainstack) { 1062 if (table->chainstack) {
@@ -1087,7 +1073,7 @@ free_unlock:
1087 mutex_unlock(&ebt_mutex); 1073 mutex_unlock(&ebt_mutex);
1088free_iterate: 1074free_iterate:
1089 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 1075 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1090 ebt_cleanup_entry, NULL); 1076 ebt_cleanup_entry, net, NULL);
1091free_counterstmp: 1077free_counterstmp:
1092 vfree(counterstmp); 1078 vfree(counterstmp);
1093 /* can be initialized in translate_table() */ 1079 /* can be initialized in translate_table() */
@@ -1096,6 +1082,59 @@ free_counterstmp:
1096 vfree(newinfo->chainstack[i]); 1082 vfree(newinfo->chainstack[i]);
1097 vfree(newinfo->chainstack); 1083 vfree(newinfo->chainstack);
1098 } 1084 }
1085 return ret;
1086}
1087
1088/* replace the table */
1089static int do_replace(struct net *net, const void __user *user,
1090 unsigned int len)
1091{
1092 int ret, countersize;
1093 struct ebt_table_info *newinfo;
1094 struct ebt_replace tmp;
1095
1096 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1097 return -EFAULT;
1098
1099 if (len != sizeof(tmp) + tmp.entries_size) {
1100 BUGPRINT("Wrong len argument\n");
1101 return -EINVAL;
1102 }
1103
1104 if (tmp.entries_size == 0) {
1105 BUGPRINT("Entries_size never zero\n");
1106 return -EINVAL;
1107 }
1108 /* overflow check */
1109 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1110 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1111 return -ENOMEM;
1112 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1113 return -ENOMEM;
1114
1115 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1116 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1117 if (!newinfo)
1118 return -ENOMEM;
1119
1120 if (countersize)
1121 memset(newinfo->counters, 0, countersize);
1122
1123 newinfo->entries = vmalloc(tmp.entries_size);
1124 if (!newinfo->entries) {
1125 ret = -ENOMEM;
1126 goto free_newinfo;
1127 }
1128 if (copy_from_user(
1129 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1130 BUGPRINT("Couldn't copy entries from userspace\n");
1131 ret = -EFAULT;
1132 goto free_entries;
1133 }
1134
1135 ret = do_replace_finish(net, &tmp, newinfo);
1136 if (ret == 0)
1137 return ret;
1099free_entries: 1138free_entries:
1100 vfree(newinfo->entries); 1139 vfree(newinfo->entries);
1101free_newinfo: 1140free_newinfo:
@@ -1154,7 +1193,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
1154 newinfo->hook_entry[i] = p + 1193 newinfo->hook_entry[i] = p +
1155 ((char *)repl->hook_entry[i] - repl->entries); 1194 ((char *)repl->hook_entry[i] - repl->entries);
1156 } 1195 }
1157 ret = translate_table(repl->name, newinfo); 1196 ret = translate_table(net, repl->name, newinfo);
1158 if (ret != 0) { 1197 if (ret != 0) {
1159 BUGPRINT("Translate_table failed\n"); 1198 BUGPRINT("Translate_table failed\n");
1160 goto free_chainstack; 1199 goto free_chainstack;
@@ -1204,7 +1243,7 @@ out:
1204 return ERR_PTR(ret); 1243 return ERR_PTR(ret);
1205} 1244}
1206 1245
1207void ebt_unregister_table(struct ebt_table *table) 1246void ebt_unregister_table(struct net *net, struct ebt_table *table)
1208{ 1247{
1209 int i; 1248 int i;
1210 1249
@@ -1216,7 +1255,7 @@ void ebt_unregister_table(struct ebt_table *table)
1216 list_del(&table->list); 1255 list_del(&table->list);
1217 mutex_unlock(&ebt_mutex); 1256 mutex_unlock(&ebt_mutex);
1218 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, 1257 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1219 ebt_cleanup_entry, NULL); 1258 ebt_cleanup_entry, net, NULL);
1220 if (table->private->nentries) 1259 if (table->private->nentries)
1221 module_put(table->me); 1260 module_put(table->me);
1222 vfree(table->private->entries); 1261 vfree(table->private->entries);
@@ -1230,39 +1269,33 @@ void ebt_unregister_table(struct ebt_table *table)
1230} 1269}
1231 1270
1232/* userspace just supplied us with counters */ 1271/* userspace just supplied us with counters */
1233static int update_counters(struct net *net, void __user *user, unsigned int len) 1272static int do_update_counters(struct net *net, const char *name,
1273 struct ebt_counter __user *counters,
1274 unsigned int num_counters,
1275 const void __user *user, unsigned int len)
1234{ 1276{
1235 int i, ret; 1277 int i, ret;
1236 struct ebt_counter *tmp; 1278 struct ebt_counter *tmp;
1237 struct ebt_replace hlp;
1238 struct ebt_table *t; 1279 struct ebt_table *t;
1239 1280
1240 if (copy_from_user(&hlp, user, sizeof(hlp))) 1281 if (num_counters == 0)
1241 return -EFAULT;
1242
1243 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1244 return -EINVAL;
1245 if (hlp.num_counters == 0)
1246 return -EINVAL; 1282 return -EINVAL;
1247 1283
1248 if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) { 1284 tmp = vmalloc(num_counters * sizeof(*tmp));
1249 MEMPRINT("Update_counters && nomemory\n"); 1285 if (!tmp)
1250 return -ENOMEM; 1286 return -ENOMEM;
1251 }
1252 1287
1253 t = find_table_lock(net, hlp.name, &ret, &ebt_mutex); 1288 t = find_table_lock(net, name, &ret, &ebt_mutex);
1254 if (!t) 1289 if (!t)
1255 goto free_tmp; 1290 goto free_tmp;
1256 1291
1257 if (hlp.num_counters != t->private->nentries) { 1292 if (num_counters != t->private->nentries) {
1258 BUGPRINT("Wrong nr of counters\n"); 1293 BUGPRINT("Wrong nr of counters\n");
1259 ret = -EINVAL; 1294 ret = -EINVAL;
1260 goto unlock_mutex; 1295 goto unlock_mutex;
1261 } 1296 }
1262 1297
1263 if ( copy_from_user(tmp, hlp.counters, 1298 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1264 hlp.num_counters * sizeof(struct ebt_counter)) ) {
1265 BUGPRINT("Updata_counters && !cfu\n");
1266 ret = -EFAULT; 1299 ret = -EFAULT;
1267 goto unlock_mutex; 1300 goto unlock_mutex;
1268 } 1301 }
@@ -1271,7 +1304,7 @@ static int update_counters(struct net *net, void __user *user, unsigned int len)
1271 write_lock_bh(&t->lock); 1304 write_lock_bh(&t->lock);
1272 1305
1273 /* we add to the counters of the first cpu */ 1306 /* we add to the counters of the first cpu */
1274 for (i = 0; i < hlp.num_counters; i++) { 1307 for (i = 0; i < num_counters; i++) {
1275 t->private->counters[i].pcnt += tmp[i].pcnt; 1308 t->private->counters[i].pcnt += tmp[i].pcnt;
1276 t->private->counters[i].bcnt += tmp[i].bcnt; 1309 t->private->counters[i].bcnt += tmp[i].bcnt;
1277 } 1310 }
@@ -1285,8 +1318,23 @@ free_tmp:
1285 return ret; 1318 return ret;
1286} 1319}
1287 1320
1288static inline int ebt_make_matchname(struct ebt_entry_match *m, 1321static int update_counters(struct net *net, const void __user *user,
1289 char *base, char __user *ubase) 1322 unsigned int len)
1323{
1324 struct ebt_replace hlp;
1325
1326 if (copy_from_user(&hlp, user, sizeof(hlp)))
1327 return -EFAULT;
1328
1329 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1330 return -EINVAL;
1331
1332 return do_update_counters(net, hlp.name, hlp.counters,
1333 hlp.num_counters, user, len);
1334}
1335
1336static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1337 const char *base, char __user *ubase)
1290{ 1338{
1291 char __user *hlp = ubase + ((char *)m - base); 1339 char __user *hlp = ubase + ((char *)m - base);
1292 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) 1340 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1294,8 +1342,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m,
1294 return 0; 1342 return 0;
1295} 1343}
1296 1344
1297static inline int ebt_make_watchername(struct ebt_entry_watcher *w, 1345static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1298 char *base, char __user *ubase) 1346 const char *base, char __user *ubase)
1299{ 1347{
1300 char __user *hlp = ubase + ((char *)w - base); 1348 char __user *hlp = ubase + ((char *)w - base);
1301 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) 1349 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1303,11 +1351,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
1303 return 0; 1351 return 0;
1304} 1352}
1305 1353
1306static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase) 1354static inline int
1355ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1307{ 1356{
1308 int ret; 1357 int ret;
1309 char __user *hlp; 1358 char __user *hlp;
1310 struct ebt_entry_target *t; 1359 const struct ebt_entry_target *t;
1311 1360
1312 if (e->bitmask == 0) 1361 if (e->bitmask == 0)
1313 return 0; 1362 return 0;
@@ -1326,13 +1375,46 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u
1326 return 0; 1375 return 0;
1327} 1376}
1328 1377
1378static int copy_counters_to_user(struct ebt_table *t,
1379 const struct ebt_counter *oldcounters,
1380 void __user *user, unsigned int num_counters,
1381 unsigned int nentries)
1382{
1383 struct ebt_counter *counterstmp;
1384 int ret = 0;
1385
1386 /* userspace might not need the counters */
1387 if (num_counters == 0)
1388 return 0;
1389
1390 if (num_counters != nentries) {
1391 BUGPRINT("Num_counters wrong\n");
1392 return -EINVAL;
1393 }
1394
1395 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1396 if (!counterstmp)
1397 return -ENOMEM;
1398
1399 write_lock_bh(&t->lock);
1400 get_counters(oldcounters, counterstmp, nentries);
1401 write_unlock_bh(&t->lock);
1402
1403 if (copy_to_user(user, counterstmp,
1404 nentries * sizeof(struct ebt_counter)))
1405 ret = -EFAULT;
1406 vfree(counterstmp);
1407 return ret;
1408}
1409
1329/* called with ebt_mutex locked */ 1410/* called with ebt_mutex locked */
1330static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1411static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1331 int *len, int cmd) 1412 const int *len, int cmd)
1332{ 1413{
1333 struct ebt_replace tmp; 1414 struct ebt_replace tmp;
1334 struct ebt_counter *counterstmp, *oldcounters; 1415 const struct ebt_counter *oldcounters;
1335 unsigned int entries_size, nentries; 1416 unsigned int entries_size, nentries;
1417 int ret;
1336 char *entries; 1418 char *entries;
1337 1419
1338 if (cmd == EBT_SO_GET_ENTRIES) { 1420 if (cmd == EBT_SO_GET_ENTRIES) {
@@ -1347,16 +1429,12 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1347 oldcounters = t->table->counters; 1429 oldcounters = t->table->counters;
1348 } 1430 }
1349 1431
1350 if (copy_from_user(&tmp, user, sizeof(tmp))) { 1432 if (copy_from_user(&tmp, user, sizeof(tmp)))
1351 BUGPRINT("Cfu didn't work\n");
1352 return -EFAULT; 1433 return -EFAULT;
1353 }
1354 1434
1355 if (*len != sizeof(struct ebt_replace) + entries_size + 1435 if (*len != sizeof(struct ebt_replace) + entries_size +
1356 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) { 1436 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1357 BUGPRINT("Wrong size\n");
1358 return -EINVAL; 1437 return -EINVAL;
1359 }
1360 1438
1361 if (tmp.nentries != nentries) { 1439 if (tmp.nentries != nentries) {
1362 BUGPRINT("Nentries wrong\n"); 1440 BUGPRINT("Nentries wrong\n");
@@ -1368,29 +1446,10 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1368 return -EINVAL; 1446 return -EINVAL;
1369 } 1447 }
1370 1448
1371 /* userspace might not need the counters */ 1449 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1372 if (tmp.num_counters) { 1450 tmp.num_counters, nentries);
1373 if (tmp.num_counters != nentries) { 1451 if (ret)
1374 BUGPRINT("Num_counters wrong\n"); 1452 return ret;
1375 return -EINVAL;
1376 }
1377 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1378 if (!counterstmp) {
1379 MEMPRINT("Couldn't copy counters, out of memory\n");
1380 return -ENOMEM;
1381 }
1382 write_lock_bh(&t->lock);
1383 get_counters(oldcounters, counterstmp, nentries);
1384 write_unlock_bh(&t->lock);
1385
1386 if (copy_to_user(tmp.counters, counterstmp,
1387 nentries * sizeof(struct ebt_counter))) {
1388 BUGPRINT("Couldn't copy counters to userspace\n");
1389 vfree(counterstmp);
1390 return -EFAULT;
1391 }
1392 vfree(counterstmp);
1393 }
1394 1453
1395 if (copy_to_user(tmp.entries, entries, entries_size)) { 1454 if (copy_to_user(tmp.entries, entries, entries_size)) {
1396 BUGPRINT("Couldn't copy entries to userspace\n"); 1455 BUGPRINT("Couldn't copy entries to userspace\n");
@@ -1418,7 +1477,7 @@ static int do_ebt_set_ctl(struct sock *sk,
1418 break; 1477 break;
1419 default: 1478 default:
1420 ret = -EINVAL; 1479 ret = -EINVAL;
1421 } 1480 }
1422 return ret; 1481 return ret;
1423} 1482}
1424 1483
@@ -1478,15 +1537,892 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1478 return ret; 1537 return ret;
1479} 1538}
1480 1539
1540#ifdef CONFIG_COMPAT
1541/* 32 bit-userspace compatibility definitions. */
1542struct compat_ebt_replace {
1543 char name[EBT_TABLE_MAXNAMELEN];
1544 compat_uint_t valid_hooks;
1545 compat_uint_t nentries;
1546 compat_uint_t entries_size;
1547 /* start of the chains */
1548 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1549 /* nr of counters userspace expects back */
1550 compat_uint_t num_counters;
1551 /* where the kernel will put the old counters. */
1552 compat_uptr_t counters;
1553 compat_uptr_t entries;
1554};
1555
1556/* struct ebt_entry_match, _target and _watcher have same layout */
1557struct compat_ebt_entry_mwt {
1558 union {
1559 char name[EBT_FUNCTION_MAXNAMELEN];
1560 compat_uptr_t ptr;
1561 } u;
1562 compat_uint_t match_size;
1563 compat_uint_t data[0];
1564};
1565
1566/* account for possible padding between match_size and ->data */
1567static int ebt_compat_entry_padsize(void)
1568{
1569 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1570 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1571 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1572 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1573}
1574
1575static int ebt_compat_match_offset(const struct xt_match *match,
1576 unsigned int userlen)
1577{
1578 /*
1579 * ebt_among needs special handling. The kernel .matchsize is
1580 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1581 * value is expected.
1582 * Example: userspace sends 4500, ebt_among.c wants 4504.
1583 */
1584 if (unlikely(match->matchsize == -1))
1585 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1586 return xt_compat_match_offset(match);
1587}
1588
1589static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1590 unsigned int *size)
1591{
1592 const struct xt_match *match = m->u.match;
1593 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1594 int off = ebt_compat_match_offset(match, m->match_size);
1595 compat_uint_t msize = m->match_size - off;
1596
1597 BUG_ON(off >= m->match_size);
1598
1599 if (copy_to_user(cm->u.name, match->name,
1600 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1601 return -EFAULT;
1602
1603 if (match->compat_to_user) {
1604 if (match->compat_to_user(cm->data, m->data))
1605 return -EFAULT;
1606 } else if (copy_to_user(cm->data, m->data, msize))
1607 return -EFAULT;
1608
1609 *size -= ebt_compat_entry_padsize() + off;
1610 *dstptr = cm->data;
1611 *dstptr += msize;
1612 return 0;
1613}
1614
1615static int compat_target_to_user(struct ebt_entry_target *t,
1616 void __user **dstptr,
1617 unsigned int *size)
1618{
1619 const struct xt_target *target = t->u.target;
1620 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1621 int off = xt_compat_target_offset(target);
1622 compat_uint_t tsize = t->target_size - off;
1623
1624 BUG_ON(off >= t->target_size);
1625
1626 if (copy_to_user(cm->u.name, target->name,
1627 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1628 return -EFAULT;
1629
1630 if (target->compat_to_user) {
1631 if (target->compat_to_user(cm->data, t->data))
1632 return -EFAULT;
1633 } else if (copy_to_user(cm->data, t->data, tsize))
1634 return -EFAULT;
1635
1636 *size -= ebt_compat_entry_padsize() + off;
1637 *dstptr = cm->data;
1638 *dstptr += tsize;
1639 return 0;
1640}
1641
1642static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1643 void __user **dstptr,
1644 unsigned int *size)
1645{
1646 return compat_target_to_user((struct ebt_entry_target *)w,
1647 dstptr, size);
1648}
1649
1650static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1651 unsigned int *size)
1652{
1653 struct ebt_entry_target *t;
1654 struct ebt_entry __user *ce;
1655 u32 watchers_offset, target_offset, next_offset;
1656 compat_uint_t origsize;
1657 int ret;
1658
1659 if (e->bitmask == 0) {
1660 if (*size < sizeof(struct ebt_entries))
1661 return -EINVAL;
1662 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1663 return -EFAULT;
1664
1665 *dstptr += sizeof(struct ebt_entries);
1666 *size -= sizeof(struct ebt_entries);
1667 return 0;
1668 }
1669
1670 if (*size < sizeof(*ce))
1671 return -EINVAL;
1672
1673 ce = (struct ebt_entry __user *)*dstptr;
1674 if (copy_to_user(ce, e, sizeof(*ce)))
1675 return -EFAULT;
1676
1677 origsize = *size;
1678 *dstptr += sizeof(*ce);
1679
1680 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1681 if (ret)
1682 return ret;
1683 watchers_offset = e->watchers_offset - (origsize - *size);
1684
1685 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1686 if (ret)
1687 return ret;
1688 target_offset = e->target_offset - (origsize - *size);
1689
1690 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1691
1692 ret = compat_target_to_user(t, dstptr, size);
1693 if (ret)
1694 return ret;
1695 next_offset = e->next_offset - (origsize - *size);
1696
1697 if (put_user(watchers_offset, &ce->watchers_offset) ||
1698 put_user(target_offset, &ce->target_offset) ||
1699 put_user(next_offset, &ce->next_offset))
1700 return -EFAULT;
1701
1702 *size -= sizeof(*ce);
1703 return 0;
1704}
1705
1706static int compat_calc_match(struct ebt_entry_match *m, int *off)
1707{
1708 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1709 *off += ebt_compat_entry_padsize();
1710 return 0;
1711}
1712
1713static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1714{
1715 *off += xt_compat_target_offset(w->u.watcher);
1716 *off += ebt_compat_entry_padsize();
1717 return 0;
1718}
1719
1720static int compat_calc_entry(const struct ebt_entry *e,
1721 const struct ebt_table_info *info,
1722 const void *base,
1723 struct compat_ebt_replace *newinfo)
1724{
1725 const struct ebt_entry_target *t;
1726 unsigned int entry_offset;
1727 int off, ret, i;
1728
1729 if (e->bitmask == 0)
1730 return 0;
1731
1732 off = 0;
1733 entry_offset = (void *)e - base;
1734
1735 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1736 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1737
1738 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1739
1740 off += xt_compat_target_offset(t->u.target);
1741 off += ebt_compat_entry_padsize();
1742
1743 newinfo->entries_size -= off;
1744
1745 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1746 if (ret)
1747 return ret;
1748
1749 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1750 const void *hookptr = info->hook_entry[i];
1751 if (info->hook_entry[i] &&
1752 (e < (struct ebt_entry *)(base - hookptr))) {
1753 newinfo->hook_entry[i] -= off;
1754 pr_debug("0x%08X -> 0x%08X\n",
1755 newinfo->hook_entry[i] + off,
1756 newinfo->hook_entry[i]);
1757 }
1758 }
1759
1760 return 0;
1761}
1762
1763
1764static int compat_table_info(const struct ebt_table_info *info,
1765 struct compat_ebt_replace *newinfo)
1766{
1767 unsigned int size = info->entries_size;
1768 const void *entries = info->entries;
1769
1770 newinfo->entries_size = size;
1771
1772 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1773 entries, newinfo);
1774}
1775
1776static int compat_copy_everything_to_user(struct ebt_table *t,
1777 void __user *user, int *len, int cmd)
1778{
1779 struct compat_ebt_replace repl, tmp;
1780 struct ebt_counter *oldcounters;
1781 struct ebt_table_info tinfo;
1782 int ret;
1783 void __user *pos;
1784
1785 memset(&tinfo, 0, sizeof(tinfo));
1786
1787 if (cmd == EBT_SO_GET_ENTRIES) {
1788 tinfo.entries_size = t->private->entries_size;
1789 tinfo.nentries = t->private->nentries;
1790 tinfo.entries = t->private->entries;
1791 oldcounters = t->private->counters;
1792 } else {
1793 tinfo.entries_size = t->table->entries_size;
1794 tinfo.nentries = t->table->nentries;
1795 tinfo.entries = t->table->entries;
1796 oldcounters = t->table->counters;
1797 }
1798
1799 if (copy_from_user(&tmp, user, sizeof(tmp)))
1800 return -EFAULT;
1801
1802 if (tmp.nentries != tinfo.nentries ||
1803 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1804 return -EINVAL;
1805
1806 memcpy(&repl, &tmp, sizeof(repl));
1807 if (cmd == EBT_SO_GET_ENTRIES)
1808 ret = compat_table_info(t->private, &repl);
1809 else
1810 ret = compat_table_info(&tinfo, &repl);
1811 if (ret)
1812 return ret;
1813
1814 if (*len != sizeof(tmp) + repl.entries_size +
1815 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1816 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1817 *len, tinfo.entries_size, repl.entries_size);
1818 return -EINVAL;
1819 }
1820
1821 /* userspace might not need the counters */
1822 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1823 tmp.num_counters, tinfo.nentries);
1824 if (ret)
1825 return ret;
1826
1827 pos = compat_ptr(tmp.entries);
1828 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1829 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1830}
1831
1832struct ebt_entries_buf_state {
1833 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1834 u32 buf_kern_len; /* total size of kernel buffer */
1835 u32 buf_kern_offset; /* amount of data copied so far */
1836 u32 buf_user_offset; /* read position in userspace buffer */
1837};
1838
1839static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1840{
1841 state->buf_kern_offset += sz;
1842 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1843}
1844
1845static int ebt_buf_add(struct ebt_entries_buf_state *state,
1846 void *data, unsigned int sz)
1847{
1848 if (state->buf_kern_start == NULL)
1849 goto count_only;
1850
1851 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1852
1853 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1854
1855 count_only:
1856 state->buf_user_offset += sz;
1857 return ebt_buf_count(state, sz);
1858}
1859
1860static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1861{
1862 char *b = state->buf_kern_start;
1863
1864 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1865
1866 if (b != NULL && sz > 0)
1867 memset(b + state->buf_kern_offset, 0, sz);
1868 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1869 return ebt_buf_count(state, sz);
1870}
1871
1872enum compat_mwt {
1873 EBT_COMPAT_MATCH,
1874 EBT_COMPAT_WATCHER,
1875 EBT_COMPAT_TARGET,
1876};
1877
1878static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1879 enum compat_mwt compat_mwt,
1880 struct ebt_entries_buf_state *state,
1881 const unsigned char *base)
1882{
1883 char name[EBT_FUNCTION_MAXNAMELEN];
1884 struct xt_match *match;
1885 struct xt_target *wt;
1886 void *dst = NULL;
1887 int off, pad = 0, ret = 0;
1888 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1889
1890 strlcpy(name, mwt->u.name, sizeof(name));
1891
1892 if (state->buf_kern_start)
1893 dst = state->buf_kern_start + state->buf_kern_offset;
1894
1895 entry_offset = (unsigned char *) mwt - base;
1896 switch (compat_mwt) {
1897 case EBT_COMPAT_MATCH:
1898 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1899 name, 0), "ebt_%s", name);
1900 if (match == NULL)
1901 return -ENOENT;
1902 if (IS_ERR(match))
1903 return PTR_ERR(match);
1904
1905 off = ebt_compat_match_offset(match, match_size);
1906 if (dst) {
1907 if (match->compat_from_user)
1908 match->compat_from_user(dst, mwt->data);
1909 else
1910 memcpy(dst, mwt->data, match_size);
1911 }
1912
1913 size_kern = match->matchsize;
1914 if (unlikely(size_kern == -1))
1915 size_kern = match_size;
1916 module_put(match->me);
1917 break;
1918 case EBT_COMPAT_WATCHER: /* fallthrough */
1919 case EBT_COMPAT_TARGET:
1920 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1921 name, 0), "ebt_%s", name);
1922 if (wt == NULL)
1923 return -ENOENT;
1924 if (IS_ERR(wt))
1925 return PTR_ERR(wt);
1926 off = xt_compat_target_offset(wt);
1927
1928 if (dst) {
1929 if (wt->compat_from_user)
1930 wt->compat_from_user(dst, mwt->data);
1931 else
1932 memcpy(dst, mwt->data, match_size);
1933 }
1934
1935 size_kern = wt->targetsize;
1936 module_put(wt->me);
1937 break;
1938 }
1939
1940 if (!dst) {
1941 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1942 off + ebt_compat_entry_padsize());
1943 if (ret < 0)
1944 return ret;
1945 }
1946
1947 state->buf_kern_offset += match_size + off;
1948 state->buf_user_offset += match_size;
1949 pad = XT_ALIGN(size_kern) - size_kern;
1950
1951 if (pad > 0 && dst) {
1952 BUG_ON(state->buf_kern_len <= pad);
1953 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1954 memset(dst + size_kern, 0, pad);
1955 }
1956 return off + match_size;
1957}
1958
1959/*
1960 * return size of all matches, watchers or target, including necessary
1961 * alignment and padding.
1962 */
1963static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1964 unsigned int size_left, enum compat_mwt type,
1965 struct ebt_entries_buf_state *state, const void *base)
1966{
1967 int growth = 0;
1968 char *buf;
1969
1970 if (size_left == 0)
1971 return 0;
1972
1973 buf = (char *) match32;
1974
1975 while (size_left >= sizeof(*match32)) {
1976 struct ebt_entry_match *match_kern;
1977 int ret;
1978
1979 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1980 if (match_kern) {
1981 char *tmp;
1982 tmp = state->buf_kern_start + state->buf_kern_offset;
1983 match_kern = (struct ebt_entry_match *) tmp;
1984 }
1985 ret = ebt_buf_add(state, buf, sizeof(*match32));
1986 if (ret < 0)
1987 return ret;
1988 size_left -= sizeof(*match32);
1989
1990 /* add padding before match->data (if any) */
1991 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1992 if (ret < 0)
1993 return ret;
1994
1995 if (match32->match_size > size_left)
1996 return -EINVAL;
1997
1998 size_left -= match32->match_size;
1999
2000 ret = compat_mtw_from_user(match32, type, state, base);
2001 if (ret < 0)
2002 return ret;
2003
2004 BUG_ON(ret < match32->match_size);
2005 growth += ret - match32->match_size;
2006 growth += ebt_compat_entry_padsize();
2007
2008 buf += sizeof(*match32);
2009 buf += match32->match_size;
2010
2011 if (match_kern)
2012 match_kern->match_size = ret;
2013
2014 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2015 match32 = (struct compat_ebt_entry_mwt *) buf;
2016 }
2017
2018 return growth;
2019}
2020
2021#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2022({ \
2023 unsigned int __i; \
2024 int __ret = 0; \
2025 struct compat_ebt_entry_mwt *__watcher; \
2026 \
2027 for (__i = e->watchers_offset; \
2028 __i < (e)->target_offset; \
2029 __i += __watcher->watcher_size + \
2030 sizeof(struct compat_ebt_entry_mwt)) { \
2031 __watcher = (void *)(e) + __i; \
2032 __ret = fn(__watcher , ## args); \
2033 if (__ret != 0) \
2034 break; \
2035 } \
2036 if (__ret == 0) { \
2037 if (__i != (e)->target_offset) \
2038 __ret = -EINVAL; \
2039 } \
2040 __ret; \
2041})
2042
2043#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2044({ \
2045 unsigned int __i; \
2046 int __ret = 0; \
2047 struct compat_ebt_entry_mwt *__match; \
2048 \
2049 for (__i = sizeof(struct ebt_entry); \
2050 __i < (e)->watchers_offset; \
2051 __i += __match->match_size + \
2052 sizeof(struct compat_ebt_entry_mwt)) { \
2053 __match = (void *)(e) + __i; \
2054 __ret = fn(__match , ## args); \
2055 if (__ret != 0) \
2056 break; \
2057 } \
2058 if (__ret == 0) { \
2059 if (__i != (e)->watchers_offset) \
2060 __ret = -EINVAL; \
2061 } \
2062 __ret; \
2063})
2064
2065/* called for all ebt_entry structures. */
2066static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2067 unsigned int *total,
2068 struct ebt_entries_buf_state *state)
2069{
2070 unsigned int i, j, startoff, new_offset = 0;
2071 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2072 unsigned int offsets[4];
2073 unsigned int *offsets_update = NULL;
2074 int ret;
2075 char *buf_start;
2076
2077 if (*total < sizeof(struct ebt_entries))
2078 return -EINVAL;
2079
2080 if (!entry->bitmask) {
2081 *total -= sizeof(struct ebt_entries);
2082 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2083 }
2084 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2085 return -EINVAL;
2086
2087 startoff = state->buf_user_offset;
2088 /* pull in most part of ebt_entry, it does not need to be changed. */
2089 ret = ebt_buf_add(state, entry,
2090 offsetof(struct ebt_entry, watchers_offset));
2091 if (ret < 0)
2092 return ret;
2093
2094 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2095 memcpy(&offsets[1], &entry->watchers_offset,
2096 sizeof(offsets) - sizeof(offsets[0]));
2097
2098 if (state->buf_kern_start) {
2099 buf_start = state->buf_kern_start + state->buf_kern_offset;
2100 offsets_update = (unsigned int *) buf_start;
2101 }
2102 ret = ebt_buf_add(state, &offsets[1],
2103 sizeof(offsets) - sizeof(offsets[0]));
2104 if (ret < 0)
2105 return ret;
2106 buf_start = (char *) entry;
2107 /*
2108 * 0: matches offset, always follows ebt_entry.
2109 * 1: watchers offset, from ebt_entry structure
2110 * 2: target offset, from ebt_entry structure
2111 * 3: next ebt_entry offset, from ebt_entry structure
2112 *
2113 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2114 */
2115 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2116 struct compat_ebt_entry_mwt *match32;
2117 unsigned int size;
2118 char *buf = buf_start;
2119
2120 buf = buf_start + offsets[i];
2121 if (offsets[i] > offsets[j])
2122 return -EINVAL;
2123
2124 match32 = (struct compat_ebt_entry_mwt *) buf;
2125 size = offsets[j] - offsets[i];
2126 ret = ebt_size_mwt(match32, size, i, state, base);
2127 if (ret < 0)
2128 return ret;
2129 new_offset += ret;
2130 if (offsets_update && new_offset) {
2131 pr_debug("ebtables: change offset %d to %d\n",
2132 offsets_update[i], offsets[j] + new_offset);
2133 offsets_update[i] = offsets[j] + new_offset;
2134 }
2135 }
2136
2137 startoff = state->buf_user_offset - startoff;
2138
2139 BUG_ON(*total < startoff);
2140 *total -= startoff;
2141 return 0;
2142}
2143
2144/*
2145 * repl->entries_size is the size of the ebt_entry blob in userspace.
2146 * It might need more memory when copied to a 64 bit kernel in case
2147 * userspace is 32-bit. So, first task: find out how much memory is needed.
2148 *
2149 * Called before validation is performed.
2150 */
2151static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2152 struct ebt_entries_buf_state *state)
2153{
2154 unsigned int size_remaining = size_user;
2155 int ret;
2156
2157 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2158 &size_remaining, state);
2159 if (ret < 0)
2160 return ret;
2161
2162 WARN_ON(size_remaining);
2163 return state->buf_kern_offset;
2164}
2165
2166
2167static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2168 void __user *user, unsigned int len)
2169{
2170 struct compat_ebt_replace tmp;
2171 int i;
2172
2173 if (len < sizeof(tmp))
2174 return -EINVAL;
2175
2176 if (copy_from_user(&tmp, user, sizeof(tmp)))
2177 return -EFAULT;
2178
2179 if (len != sizeof(tmp) + tmp.entries_size)
2180 return -EINVAL;
2181
2182 if (tmp.entries_size == 0)
2183 return -EINVAL;
2184
2185 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2186 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2187 return -ENOMEM;
2188 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2189 return -ENOMEM;
2190
2191 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2192
2193 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2194 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2195 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2196
2197 repl->num_counters = tmp.num_counters;
2198 repl->counters = compat_ptr(tmp.counters);
2199 repl->entries = compat_ptr(tmp.entries);
2200 return 0;
2201}
2202
2203static int compat_do_replace(struct net *net, void __user *user,
2204 unsigned int len)
2205{
2206 int ret, i, countersize, size64;
2207 struct ebt_table_info *newinfo;
2208 struct ebt_replace tmp;
2209 struct ebt_entries_buf_state state;
2210 void *entries_tmp;
2211
2212 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2213 if (ret) {
2214 /* try real handler in case userland supplied needed padding */
2215 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2216 ret = 0;
2217 return ret;
2218 }
2219
2220 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2221 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2222 if (!newinfo)
2223 return -ENOMEM;
2224
2225 if (countersize)
2226 memset(newinfo->counters, 0, countersize);
2227
2228 memset(&state, 0, sizeof(state));
2229
2230 newinfo->entries = vmalloc(tmp.entries_size);
2231 if (!newinfo->entries) {
2232 ret = -ENOMEM;
2233 goto free_newinfo;
2234 }
2235 if (copy_from_user(
2236 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2237 ret = -EFAULT;
2238 goto free_entries;
2239 }
2240
2241 entries_tmp = newinfo->entries;
2242
2243 xt_compat_lock(NFPROTO_BRIDGE);
2244
2245 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2246 if (ret < 0)
2247 goto out_unlock;
2248
2249 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2250 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2251 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2252
2253 size64 = ret;
2254 newinfo->entries = vmalloc(size64);
2255 if (!newinfo->entries) {
2256 vfree(entries_tmp);
2257 ret = -ENOMEM;
2258 goto out_unlock;
2259 }
2260
2261 memset(&state, 0, sizeof(state));
2262 state.buf_kern_start = newinfo->entries;
2263 state.buf_kern_len = size64;
2264
2265 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2266 BUG_ON(ret < 0); /* parses same data again */
2267
2268 vfree(entries_tmp);
2269 tmp.entries_size = size64;
2270
2271 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2272 char __user *usrptr;
2273 if (tmp.hook_entry[i]) {
2274 unsigned int delta;
2275 usrptr = (char __user *) tmp.hook_entry[i];
2276 delta = usrptr - tmp.entries;
2277 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2278 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2279 }
2280 }
2281
2282 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2283 xt_compat_unlock(NFPROTO_BRIDGE);
2284
2285 ret = do_replace_finish(net, &tmp, newinfo);
2286 if (ret == 0)
2287 return ret;
2288free_entries:
2289 vfree(newinfo->entries);
2290free_newinfo:
2291 vfree(newinfo);
2292 return ret;
2293out_unlock:
2294 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2295 xt_compat_unlock(NFPROTO_BRIDGE);
2296 goto free_entries;
2297}
2298
2299static int compat_update_counters(struct net *net, void __user *user,
2300 unsigned int len)
2301{
2302 struct compat_ebt_replace hlp;
2303
2304 if (copy_from_user(&hlp, user, sizeof(hlp)))
2305 return -EFAULT;
2306
2307 /* try real handler in case userland supplied needed padding */
2308 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2309 return update_counters(net, user, len);
2310
2311 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2312 hlp.num_counters, user, len);
2313}
2314
2315static int compat_do_ebt_set_ctl(struct sock *sk,
2316 int cmd, void __user *user, unsigned int len)
2317{
2318 int ret;
2319
2320 if (!capable(CAP_NET_ADMIN))
2321 return -EPERM;
2322
2323 switch (cmd) {
2324 case EBT_SO_SET_ENTRIES:
2325 ret = compat_do_replace(sock_net(sk), user, len);
2326 break;
2327 case EBT_SO_SET_COUNTERS:
2328 ret = compat_update_counters(sock_net(sk), user, len);
2329 break;
2330 default:
2331 ret = -EINVAL;
2332 }
2333 return ret;
2334}
2335
2336static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2337 void __user *user, int *len)
2338{
2339 int ret;
2340 struct compat_ebt_replace tmp;
2341 struct ebt_table *t;
2342
2343 if (!capable(CAP_NET_ADMIN))
2344 return -EPERM;
2345
2346 /* try real handler in case userland supplied needed padding */
2347 if ((cmd == EBT_SO_GET_INFO ||
2348 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2349 return do_ebt_get_ctl(sk, cmd, user, len);
2350
2351 if (copy_from_user(&tmp, user, sizeof(tmp)))
2352 return -EFAULT;
2353
2354 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2355 if (!t)
2356 return ret;
2357
2358 xt_compat_lock(NFPROTO_BRIDGE);
2359 switch (cmd) {
2360 case EBT_SO_GET_INFO:
2361 tmp.nentries = t->private->nentries;
2362 ret = compat_table_info(t->private, &tmp);
2363 if (ret)
2364 goto out;
2365 tmp.valid_hooks = t->valid_hooks;
2366
2367 if (copy_to_user(user, &tmp, *len) != 0) {
2368 ret = -EFAULT;
2369 break;
2370 }
2371 ret = 0;
2372 break;
2373 case EBT_SO_GET_INIT_INFO:
2374 tmp.nentries = t->table->nentries;
2375 tmp.entries_size = t->table->entries_size;
2376 tmp.valid_hooks = t->table->valid_hooks;
2377
2378 if (copy_to_user(user, &tmp, *len) != 0) {
2379 ret = -EFAULT;
2380 break;
2381 }
2382 ret = 0;
2383 break;
2384 case EBT_SO_GET_ENTRIES:
2385 case EBT_SO_GET_INIT_ENTRIES:
2386 /*
2387 * try real handler first in case of userland-side padding.
2388 * in case we are dealing with an 'ordinary' 32 bit binary
2389 * without 64bit compatibility padding, this will fail right
2390 * after copy_from_user when the *len argument is validated.
2391 *
2392 * the compat_ variant needs to do one pass over the kernel
2393 * data set to adjust for size differences before it the check.
2394 */
2395 if (copy_everything_to_user(t, user, len, cmd) == 0)
2396 ret = 0;
2397 else
2398 ret = compat_copy_everything_to_user(t, user, len, cmd);
2399 break;
2400 default:
2401 ret = -EINVAL;
2402 }
2403 out:
2404 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2405 xt_compat_unlock(NFPROTO_BRIDGE);
2406 mutex_unlock(&ebt_mutex);
2407 return ret;
2408}
2409#endif
2410
1481static struct nf_sockopt_ops ebt_sockopts = 2411static struct nf_sockopt_ops ebt_sockopts =
1482{ 2412{
1483 .pf = PF_INET, 2413 .pf = PF_INET,
1484 .set_optmin = EBT_BASE_CTL, 2414 .set_optmin = EBT_BASE_CTL,
1485 .set_optmax = EBT_SO_SET_MAX + 1, 2415 .set_optmax = EBT_SO_SET_MAX + 1,
1486 .set = do_ebt_set_ctl, 2416 .set = do_ebt_set_ctl,
2417#ifdef CONFIG_COMPAT
2418 .compat_set = compat_do_ebt_set_ctl,
2419#endif
1487 .get_optmin = EBT_BASE_CTL, 2420 .get_optmin = EBT_BASE_CTL,
1488 .get_optmax = EBT_SO_GET_MAX + 1, 2421 .get_optmax = EBT_SO_GET_MAX + 1,
1489 .get = do_ebt_get_ctl, 2422 .get = do_ebt_get_ctl,
2423#ifdef CONFIG_COMPAT
2424 .compat_get = compat_do_ebt_get_ctl,
2425#endif
1490 .owner = THIS_MODULE, 2426 .owner = THIS_MODULE,
1491}; 2427};
1492 2428
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 51adc4c2b860..702be5a2c956 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO); 77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79 79
80HLIST_HEAD(can_rx_dev_list); 80/* receive filters subscribed for 'all' CAN devices */
81static struct dev_rcv_lists can_rx_alldev_list; 81struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock); 82static DEFINE_SPINLOCK(can_rcvlists_lock);
83 83
84static struct kmem_cache *rcv_cache __read_mostly; 84static struct kmem_cache *rcv_cache __read_mostly;
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send);
292 292
293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) 293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
294{ 294{
295 struct dev_rcv_lists *d = NULL; 295 if (!dev)
296 struct hlist_node *n; 296 return &can_rx_alldev_list;
297 297 else
298 /* 298 return (struct dev_rcv_lists *)dev->ml_priv;
299 * find receive list for this device
300 *
301 * The hlist_for_each_entry*() macros curse through the list
302 * using the pointer variable n and set d to the containing
303 * struct in each list iteration. Therefore, after list
304 * iteration, d is unmodified when the list is empty, and it
305 * points to last list element, when the list is non-empty
306 * but no match in the loop body is found. I.e. d is *not*
307 * NULL when no match is found. We can, however, use the
308 * cursor variable n to decide if a match was found.
309 */
310
311 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
312 if (d->dev == dev)
313 break;
314 }
315
316 return n ? d : NULL;
317} 299}
318 300
319/** 301/**
@@ -433,6 +415,9 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
433 415
434 /* insert new receiver (dev,canid,mask) -> (func,data) */ 416 /* insert new receiver (dev,canid,mask) -> (func,data) */
435 417
418 if (dev && dev->type != ARPHRD_CAN)
419 return -ENODEV;
420
436 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 421 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
437 if (!r) 422 if (!r)
438 return -ENOMEM; 423 return -ENOMEM;
@@ -468,16 +453,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
468EXPORT_SYMBOL(can_rx_register); 453EXPORT_SYMBOL(can_rx_register);
469 454
470/* 455/*
471 * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal
472 */
473static void can_rx_delete_device(struct rcu_head *rp)
474{
475 struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
476
477 kfree(d);
478}
479
480/*
481 * can_rx_delete_receiver - rcu callback for single receiver entry removal 456 * can_rx_delete_receiver - rcu callback for single receiver entry removal
482 */ 457 */
483static void can_rx_delete_receiver(struct rcu_head *rp) 458static void can_rx_delete_receiver(struct rcu_head *rp)
@@ -506,6 +481,9 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
506 struct hlist_node *next; 481 struct hlist_node *next;
507 struct dev_rcv_lists *d; 482 struct dev_rcv_lists *d;
508 483
484 if (dev && dev->type != ARPHRD_CAN)
485 return;
486
509 spin_lock(&can_rcvlists_lock); 487 spin_lock(&can_rcvlists_lock);
510 488
511 d = find_dev_rcv_lists(dev); 489 d = find_dev_rcv_lists(dev);
@@ -541,7 +519,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
541 "dev %s, id %03X, mask %03X\n", 519 "dev %s, id %03X, mask %03X\n",
542 DNAME(dev), can_id, mask); 520 DNAME(dev), can_id, mask);
543 r = NULL; 521 r = NULL;
544 d = NULL;
545 goto out; 522 goto out;
546 } 523 }
547 524
@@ -552,10 +529,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
552 can_pstats.rcv_entries--; 529 can_pstats.rcv_entries--;
553 530
554 /* remove device structure requested by NETDEV_UNREGISTER */ 531 /* remove device structure requested by NETDEV_UNREGISTER */
555 if (d->remove_on_zero_entries && !d->entries) 532 if (d->remove_on_zero_entries && !d->entries) {
556 hlist_del_rcu(&d->list); 533 kfree(d);
557 else 534 dev->ml_priv = NULL;
558 d = NULL; 535 }
559 536
560 out: 537 out:
561 spin_unlock(&can_rcvlists_lock); 538 spin_unlock(&can_rcvlists_lock);
@@ -563,10 +540,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
563 /* schedule the receiver item for deletion */ 540 /* schedule the receiver item for deletion */
564 if (r) 541 if (r)
565 call_rcu(&r->rcu, can_rx_delete_receiver); 542 call_rcu(&r->rcu, can_rx_delete_receiver);
566
567 /* schedule the device structure for deletion */
568 if (d)
569 call_rcu(&d->rcu, can_rx_delete_device);
570} 543}
571EXPORT_SYMBOL(can_rx_unregister); 544EXPORT_SYMBOL(can_rx_unregister);
572 545
@@ -780,48 +753,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
780 753
781 case NETDEV_REGISTER: 754 case NETDEV_REGISTER:
782 755
783 /* 756 /* create new dev_rcv_lists for this device */
784 * create new dev_rcv_lists for this device
785 *
786 * N.B. zeroing the struct is the correct initialization
787 * for the embedded hlist_head structs.
788 * Another list type, e.g. list_head, would require
789 * explicit initialization.
790 */
791
792 d = kzalloc(sizeof(*d), GFP_KERNEL); 757 d = kzalloc(sizeof(*d), GFP_KERNEL);
793 if (!d) { 758 if (!d) {
794 printk(KERN_ERR 759 printk(KERN_ERR
795 "can: allocation of receive list failed\n"); 760 "can: allocation of receive list failed\n");
796 return NOTIFY_DONE; 761 return NOTIFY_DONE;
797 } 762 }
798 d->dev = dev; 763 BUG_ON(dev->ml_priv);
799 764 dev->ml_priv = d;
800 spin_lock(&can_rcvlists_lock);
801 hlist_add_head_rcu(&d->list, &can_rx_dev_list);
802 spin_unlock(&can_rcvlists_lock);
803 765
804 break; 766 break;
805 767
806 case NETDEV_UNREGISTER: 768 case NETDEV_UNREGISTER:
807 spin_lock(&can_rcvlists_lock); 769 spin_lock(&can_rcvlists_lock);
808 770
809 d = find_dev_rcv_lists(dev); 771 d = dev->ml_priv;
810 if (d) { 772 if (d) {
811 if (d->entries) { 773 if (d->entries)
812 d->remove_on_zero_entries = 1; 774 d->remove_on_zero_entries = 1;
813 d = NULL; 775 else {
814 } else 776 kfree(d);
815 hlist_del_rcu(&d->list); 777 dev->ml_priv = NULL;
778 }
816 } else 779 } else
817 printk(KERN_ERR "can: notifier: receive list not " 780 printk(KERN_ERR "can: notifier: receive list not "
818 "found for dev %s\n", dev->name); 781 "found for dev %s\n", dev->name);
819 782
820 spin_unlock(&can_rcvlists_lock); 783 spin_unlock(&can_rcvlists_lock);
821 784
822 if (d)
823 call_rcu(&d->rcu, can_rx_delete_device);
824
825 break; 785 break;
826 } 786 }
827 787
@@ -853,21 +813,13 @@ static __init int can_init(void)
853{ 813{
854 printk(banner); 814 printk(banner);
855 815
816 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
817
856 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 818 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
857 0, 0, NULL); 819 0, 0, NULL);
858 if (!rcv_cache) 820 if (!rcv_cache)
859 return -ENOMEM; 821 return -ENOMEM;
860 822
861 /*
862 * Insert can_rx_alldev_list for reception on all devices.
863 * This struct is zero initialized which is correct for the
864 * embedded hlist heads, the dev pointer, and the entries counter.
865 */
866
867 spin_lock(&can_rcvlists_lock);
868 hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
869 spin_unlock(&can_rcvlists_lock);
870
871 if (stats_timer) { 823 if (stats_timer) {
872 /* the statistics are updated every second (timer triggered) */ 824 /* the statistics are updated every second (timer triggered) */
873 setup_timer(&can_stattimer, can_stat_update, 0); 825 setup_timer(&can_stattimer, can_stat_update, 0);
@@ -887,8 +839,7 @@ static __init int can_init(void)
887 839
888static __exit void can_exit(void) 840static __exit void can_exit(void)
889{ 841{
890 struct dev_rcv_lists *d; 842 struct net_device *dev;
891 struct hlist_node *n, *next;
892 843
893 if (stats_timer) 844 if (stats_timer)
894 del_timer(&can_stattimer); 845 del_timer(&can_stattimer);
@@ -900,14 +851,19 @@ static __exit void can_exit(void)
900 unregister_netdevice_notifier(&can_netdev_notifier); 851 unregister_netdevice_notifier(&can_netdev_notifier);
901 sock_unregister(PF_CAN); 852 sock_unregister(PF_CAN);
902 853
903 /* remove can_rx_dev_list */ 854 /* remove created dev_rcv_lists from still registered CAN devices */
904 spin_lock(&can_rcvlists_lock); 855 rcu_read_lock();
905 hlist_del(&can_rx_alldev_list.list); 856 for_each_netdev_rcu(&init_net, dev) {
906 hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) { 857 if (dev->type == ARPHRD_CAN && dev->ml_priv){
907 hlist_del(&d->list); 858
908 kfree(d); 859 struct dev_rcv_lists *d = dev->ml_priv;
860
861 BUG_ON(d->entries);
862 kfree(d);
863 dev->ml_priv = NULL;
864 }
909 } 865 }
910 spin_unlock(&can_rcvlists_lock); 866 rcu_read_unlock();
911 867
912 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 868 rcu_barrier(); /* Wait for completion of call_rcu()'s */
913 869
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 18f91e37cc30..34253b84e30f 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -63,10 +63,8 @@ struct receiver {
63 63
64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; 64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
65 65
66/* per device receive filters linked at dev->ml_priv */
66struct dev_rcv_lists { 67struct dev_rcv_lists {
67 struct hlist_node list;
68 struct rcu_head rcu;
69 struct net_device *dev;
70 struct hlist_head rx[RX_MAX]; 68 struct hlist_head rx[RX_MAX];
71 struct hlist_head rx_sff[0x800]; 69 struct hlist_head rx_sff[0x800];
72 int remove_on_zero_entries; 70 int remove_on_zero_entries;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index e32af52238a2..907dc871fac8 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -56,6 +56,7 @@
56#include <linux/can.h> 56#include <linux/can.h>
57#include <linux/can/core.h> 57#include <linux/can/core.h>
58#include <linux/can/bcm.h> 58#include <linux/can/bcm.h>
59#include <linux/slab.h>
59#include <net/sock.h> 60#include <net/sock.h>
60#include <net/net_namespace.h> 61#include <net/net_namespace.h>
61 62
@@ -1478,6 +1479,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1478 struct sock *sk = sock->sk; 1479 struct sock *sk = sock->sk;
1479 struct bcm_sock *bo = bcm_sk(sk); 1480 struct bcm_sock *bo = bcm_sk(sk);
1480 1481
1482 if (len < sizeof(*addr))
1483 return -EINVAL;
1484
1481 if (bo->bound) 1485 if (bo->bound)
1482 return -EISCONN; 1486 return -EISCONN;
1483 1487
diff --git a/net/can/proc.c b/net/can/proc.c
index 9b9ad29be567..f4265cc9c3fb 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -45,6 +45,7 @@
45#include <linux/proc_fs.h> 45#include <linux/proc_fs.h>
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/rcupdate.h> 47#include <linux/rcupdate.h>
48#include <linux/if_arp.h>
48#include <linux/can/core.h> 49#include <linux/can/core.h>
49 50
50#include "af_can.h" 51#include "af_can.h"
@@ -84,6 +85,9 @@ static const char rx_list_name[][8] = {
84 [RX_EFF] = "rx_eff", 85 [RX_EFF] = "rx_eff",
85}; 86};
86 87
88/* receive filters subscribed for 'all' CAN devices */
89extern struct dev_rcv_lists can_rx_alldev_list;
90
87/* 91/*
88 * af_can statistics stuff 92 * af_can statistics stuff
89 */ 93 */
@@ -190,10 +194,6 @@ void can_stat_update(unsigned long data)
190 194
191/* 195/*
192 * proc read functions 196 * proc read functions
193 *
194 * From known use-cases we expect about 10 entries in a receive list to be
195 * printed in the proc_fs. So PAGE_SIZE is definitely enough space here.
196 *
197 */ 197 */
198 198
199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, 199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
@@ -202,7 +202,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
202 struct receiver *r; 202 struct receiver *r;
203 struct hlist_node *n; 203 struct hlist_node *n;
204 204
205 rcu_read_lock();
206 hlist_for_each_entry_rcu(r, n, rx_list, list) { 205 hlist_for_each_entry_rcu(r, n, rx_list, list) {
207 char *fmt = (r->can_id & CAN_EFF_FLAG)? 206 char *fmt = (r->can_id & CAN_EFF_FLAG)?
208 " %-5s %08X %08x %08x %08x %8ld %s\n" : 207 " %-5s %08X %08x %08x %08x %8ld %s\n" :
@@ -212,7 +211,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
212 (unsigned long)r->func, (unsigned long)r->data, 211 (unsigned long)r->func, (unsigned long)r->data,
213 r->matches, r->ident); 212 r->matches, r->ident);
214 } 213 }
215 rcu_read_unlock();
216} 214}
217 215
218static void can_print_recv_banner(struct seq_file *m) 216static void can_print_recv_banner(struct seq_file *m)
@@ -346,24 +344,39 @@ static const struct file_operations can_version_proc_fops = {
346 .release = single_release, 344 .release = single_release,
347}; 345};
348 346
347static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
348 struct net_device *dev,
349 struct dev_rcv_lists *d)
350{
351 if (!hlist_empty(&d->rx[idx])) {
352 can_print_recv_banner(m);
353 can_print_rcvlist(m, &d->rx[idx], dev);
354 } else
355 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
356
357}
358
349static int can_rcvlist_proc_show(struct seq_file *m, void *v) 359static int can_rcvlist_proc_show(struct seq_file *m, void *v)
350{ 360{
351 /* double cast to prevent GCC warning */ 361 /* double cast to prevent GCC warning */
352 int idx = (int)(long)m->private; 362 int idx = (int)(long)m->private;
363 struct net_device *dev;
353 struct dev_rcv_lists *d; 364 struct dev_rcv_lists *d;
354 struct hlist_node *n;
355 365
356 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); 366 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
357 367
358 rcu_read_lock(); 368 rcu_read_lock();
359 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
360 369
361 if (!hlist_empty(&d->rx[idx])) { 370 /* receive list for 'all' CAN devices (dev == NULL) */
362 can_print_recv_banner(m); 371 d = &can_rx_alldev_list;
363 can_print_rcvlist(m, &d->rx[idx], d->dev); 372 can_rcvlist_proc_show_one(m, idx, NULL, d);
364 } else 373
365 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev)); 374 /* receive list for registered CAN devices */
375 for_each_netdev_rcu(&init_net, dev) {
376 if (dev->type == ARPHRD_CAN && dev->ml_priv)
377 can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
366 } 378 }
379
367 rcu_read_unlock(); 380 rcu_read_unlock();
368 381
369 seq_putc(m, '\n'); 382 seq_putc(m, '\n');
@@ -383,34 +396,50 @@ static const struct file_operations can_rcvlist_proc_fops = {
383 .release = single_release, 396 .release = single_release,
384}; 397};
385 398
399static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
400 struct net_device *dev,
401 struct dev_rcv_lists *d)
402{
403 int i;
404 int all_empty = 1;
405
406 /* check wether at least one list is non-empty */
407 for (i = 0; i < 0x800; i++)
408 if (!hlist_empty(&d->rx_sff[i])) {
409 all_empty = 0;
410 break;
411 }
412
413 if (!all_empty) {
414 can_print_recv_banner(m);
415 for (i = 0; i < 0x800; i++) {
416 if (!hlist_empty(&d->rx_sff[i]))
417 can_print_rcvlist(m, &d->rx_sff[i], dev);
418 }
419 } else
420 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
421}
422
386static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) 423static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
387{ 424{
425 struct net_device *dev;
388 struct dev_rcv_lists *d; 426 struct dev_rcv_lists *d;
389 struct hlist_node *n;
390 427
391 /* RX_SFF */ 428 /* RX_SFF */
392 seq_puts(m, "\nreceive list 'rx_sff':\n"); 429 seq_puts(m, "\nreceive list 'rx_sff':\n");
393 430
394 rcu_read_lock(); 431 rcu_read_lock();
395 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { 432
396 int i, all_empty = 1; 433 /* sff receive list for 'all' CAN devices (dev == NULL) */
397 /* check wether at least one list is non-empty */ 434 d = &can_rx_alldev_list;
398 for (i = 0; i < 0x800; i++) 435 can_rcvlist_sff_proc_show_one(m, NULL, d);
399 if (!hlist_empty(&d->rx_sff[i])) { 436
400 all_empty = 0; 437 /* sff receive list for registered CAN devices */
401 break; 438 for_each_netdev_rcu(&init_net, dev) {
402 } 439 if (dev->type == ARPHRD_CAN && dev->ml_priv)
403 440 can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
404 if (!all_empty) {
405 can_print_recv_banner(m);
406 for (i = 0; i < 0x800; i++) {
407 if (!hlist_empty(&d->rx_sff[i]))
408 can_print_rcvlist(m, &d->rx_sff[i],
409 d->dev);
410 }
411 } else
412 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
413 } 441 }
442
414 rcu_read_unlock(); 443 rcu_read_unlock();
415 444
416 seq_putc(m, '\n'); 445 seq_putc(m, '\n');
diff --git a/net/can/raw.c b/net/can/raw.c
index abca920440b5..da99cf153b33 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/uio.h> 46#include <linux/uio.h>
47#include <linux/net.h> 47#include <linux/net.h>
48#include <linux/slab.h>
48#include <linux/netdevice.h> 49#include <linux/netdevice.h>
49#include <linux/socket.h> 50#include <linux/socket.h>
50#include <linux/if_arp.h> 51#include <linux/if_arp.h>
@@ -444,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
444 return -EFAULT; 445 return -EFAULT;
445 } 446 }
446 } else if (count == 1) { 447 } else if (count == 1) {
447 if (copy_from_user(&sfilter, optval, optlen)) 448 if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
448 return -EFAULT; 449 return -EFAULT;
449 } 450 }
450 451
diff --git a/net/compat.c b/net/compat.c
index a1fb1b079a82..ec24d9edb025 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/gfp.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/file.h> 18#include <linux/file.h>
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 95c2e0840d0d..2dccd4ee591b 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -48,6 +48,7 @@
48#include <linux/poll.h> 48#include <linux/poll.h>
49#include <linux/highmem.h> 49#include <linux/highmem.h>
50#include <linux/spinlock.h> 50#include <linux/spinlock.h>
51#include <linux/slab.h>
51 52
52#include <net/protocol.h> 53#include <net/protocol.h>
53#include <linux/skbuff.h> 54#include <linux/skbuff.h>
diff --git a/net/core/dev.c b/net/core/dev.c
index ec874218b206..f769098774b7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -80,6 +80,7 @@
80#include <linux/types.h> 80#include <linux/types.h>
81#include <linux/kernel.h> 81#include <linux/kernel.h>
82#include <linux/hash.h> 82#include <linux/hash.h>
83#include <linux/slab.h>
83#include <linux/sched.h> 84#include <linux/sched.h>
84#include <linux/mutex.h> 85#include <linux/mutex.h>
85#include <linux/string.h> 86#include <linux/string.h>
@@ -1113,19 +1114,7 @@ void dev_load(struct net *net, const char *name)
1113} 1114}
1114EXPORT_SYMBOL(dev_load); 1115EXPORT_SYMBOL(dev_load);
1115 1116
1116/** 1117static int __dev_open(struct net_device *dev)
1117 * dev_open - prepare an interface for use.
1118 * @dev: device to open
1119 *
1120 * Takes a device from down to up state. The device's private open
1121 * function is invoked and then the multicast lists are loaded. Finally
1122 * the device is moved into the up state and a %NETDEV_UP message is
1123 * sent to the netdev notifier chain.
1124 *
1125 * Calling this function on an active interface is a nop. On a failure
1126 * a negative errno code is returned.
1127 */
1128int dev_open(struct net_device *dev)
1129{ 1118{
1130 const struct net_device_ops *ops = dev->netdev_ops; 1119 const struct net_device_ops *ops = dev->netdev_ops;
1131 int ret; 1120 int ret;
@@ -1133,13 +1122,6 @@ int dev_open(struct net_device *dev)
1133 ASSERT_RTNL(); 1122 ASSERT_RTNL();
1134 1123
1135 /* 1124 /*
1136 * Is it already up?
1137 */
1138
1139 if (dev->flags & IFF_UP)
1140 return 0;
1141
1142 /*
1143 * Is it even present? 1125 * Is it even present?
1144 */ 1126 */
1145 if (!netif_device_present(dev)) 1127 if (!netif_device_present(dev))
@@ -1187,36 +1169,57 @@ int dev_open(struct net_device *dev)
1187 * Wakeup transmit queue engine 1169 * Wakeup transmit queue engine
1188 */ 1170 */
1189 dev_activate(dev); 1171 dev_activate(dev);
1190
1191 /*
1192 * ... and announce new interface.
1193 */
1194 call_netdevice_notifiers(NETDEV_UP, dev);
1195 } 1172 }
1196 1173
1197 return ret; 1174 return ret;
1198} 1175}
1199EXPORT_SYMBOL(dev_open);
1200 1176
1201/** 1177/**
1202 * dev_close - shutdown an interface. 1178 * dev_open - prepare an interface for use.
1203 * @dev: device to shutdown 1179 * @dev: device to open
1204 * 1180 *
1205 * This function moves an active device into down state. A 1181 * Takes a device from down to up state. The device's private open
1206 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1182 * function is invoked and then the multicast lists are loaded. Finally
1207 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1183 * the device is moved into the up state and a %NETDEV_UP message is
1208 * chain. 1184 * sent to the netdev notifier chain.
1185 *
1186 * Calling this function on an active interface is a nop. On a failure
1187 * a negative errno code is returned.
1209 */ 1188 */
1210int dev_close(struct net_device *dev) 1189int dev_open(struct net_device *dev)
1190{
1191 int ret;
1192
1193 /*
1194 * Is it already up?
1195 */
1196 if (dev->flags & IFF_UP)
1197 return 0;
1198
1199 /*
1200 * Open device
1201 */
1202 ret = __dev_open(dev);
1203 if (ret < 0)
1204 return ret;
1205
1206 /*
1207 * ... and announce new interface.
1208 */
1209 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1210 call_netdevice_notifiers(NETDEV_UP, dev);
1211
1212 return ret;
1213}
1214EXPORT_SYMBOL(dev_open);
1215
1216static int __dev_close(struct net_device *dev)
1211{ 1217{
1212 const struct net_device_ops *ops = dev->netdev_ops; 1218 const struct net_device_ops *ops = dev->netdev_ops;
1213 ASSERT_RTNL();
1214 1219
1220 ASSERT_RTNL();
1215 might_sleep(); 1221 might_sleep();
1216 1222
1217 if (!(dev->flags & IFF_UP))
1218 return 0;
1219
1220 /* 1223 /*
1221 * Tell people we are going down, so that they can 1224 * Tell people we are going down, so that they can
1222 * prepare to death, when device is still operating. 1225 * prepare to death, when device is still operating.
@@ -1252,14 +1255,34 @@ int dev_close(struct net_device *dev)
1252 dev->flags &= ~IFF_UP; 1255 dev->flags &= ~IFF_UP;
1253 1256
1254 /* 1257 /*
1255 * Tell people we are down 1258 * Shutdown NET_DMA
1256 */ 1259 */
1257 call_netdevice_notifiers(NETDEV_DOWN, dev); 1260 net_dmaengine_put();
1261
1262 return 0;
1263}
1264
1265/**
1266 * dev_close - shutdown an interface.
1267 * @dev: device to shutdown
1268 *
1269 * This function moves an active device into down state. A
1270 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1271 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1272 * chain.
1273 */
1274int dev_close(struct net_device *dev)
1275{
1276 if (!(dev->flags & IFF_UP))
1277 return 0;
1278
1279 __dev_close(dev);
1258 1280
1259 /* 1281 /*
1260 * Shutdown NET_DMA 1282 * Tell people we are down
1261 */ 1283 */
1262 net_dmaengine_put(); 1284 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1285 call_netdevice_notifiers(NETDEV_DOWN, dev);
1263 1286
1264 return 0; 1287 return 0;
1265} 1288}
@@ -1448,13 +1471,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1448 if (skb->len > (dev->mtu + dev->hard_header_len)) 1471 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP; 1472 return NET_RX_DROP;
1450 1473
1451 skb_dst_drop(skb); 1474 skb_set_dev(skb, dev);
1452 skb->tstamp.tv64 = 0; 1475 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST; 1476 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev); 1477 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb); 1478 return netif_rx(skb);
1459} 1479}
1460EXPORT_SYMBOL_GPL(dev_forward_skb); 1480EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1614,6 +1634,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1614 return false; 1634 return false;
1615} 1635}
1616 1636
1637/**
1638 * skb_dev_set -- assign a new device to a buffer
1639 * @skb: buffer for the new device
1640 * @dev: network device
1641 *
1642 * If an skb is owned by a device already, we have to reset
1643 * all data private to the namespace a device belongs to
1644 * before assigning it a new device.
1645 */
1646#ifdef CONFIG_NET_NS
1647void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1648{
1649 skb_dst_drop(skb);
1650 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1651 secpath_reset(skb);
1652 nf_reset(skb);
1653 skb_init_secmark(skb);
1654 skb->mark = 0;
1655 skb->priority = 0;
1656 skb->nf_trace = 0;
1657 skb->ipvs_property = 0;
1658#ifdef CONFIG_NET_SCHED
1659 skb->tc_index = 0;
1660#endif
1661 }
1662 skb->dev = dev;
1663}
1664EXPORT_SYMBOL(skb_set_dev);
1665#endif /* CONFIG_NET_NS */
1666
1617/* 1667/*
1618 * Invalidate hardware checksum when packet is to be mangled, and 1668 * Invalidate hardware checksum when packet is to be mangled, and
1619 * complete checksum manually on outgoing path. 1669 * complete checksum manually on outgoing path.
@@ -1853,6 +1903,14 @@ gso:
1853 1903
1854 skb->next = nskb->next; 1904 skb->next = nskb->next;
1855 nskb->next = NULL; 1905 nskb->next = NULL;
1906
1907 /*
1908 * If device doesnt need nskb->dst, release it right now while
1909 * its hot in this cpu cache
1910 */
1911 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1912 skb_dst_drop(nskb);
1913
1856 rc = ops->ndo_start_xmit(nskb, dev); 1914 rc = ops->ndo_start_xmit(nskb, dev);
1857 if (unlikely(rc != NETDEV_TX_OK)) { 1915 if (unlikely(rc != NETDEV_TX_OK)) {
1858 if (rc & ~NETDEV_TX_MASK) 1916 if (rc & ~NETDEV_TX_MASK)
@@ -1931,8 +1989,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1931 if (dev->real_num_tx_queues > 1) 1989 if (dev->real_num_tx_queues > 1)
1932 queue_index = skb_tx_hash(dev, skb); 1990 queue_index = skb_tx_hash(dev, skb);
1933 1991
1934 if (sk && sk->sk_dst_cache) 1992 if (sk) {
1935 sk_tx_queue_set(sk, queue_index); 1993 struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache);
1994
1995 if (dst && skb_dst(skb) == dst)
1996 sk_tx_queue_set(sk, queue_index);
1997 }
1936 } 1998 }
1937 } 1999 }
1938 2000
@@ -1974,6 +2036,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1974 return rc; 2036 return rc;
1975} 2037}
1976 2038
2039/*
2040 * Returns true if either:
2041 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2042 * 2. skb is fragmented and the device does not support SG, or if
2043 * at least one of fragments is in highmem and device does not
2044 * support DMA from it.
2045 */
2046static inline int skb_needs_linearize(struct sk_buff *skb,
2047 struct net_device *dev)
2048{
2049 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2050 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2051 illegal_highdma(dev, skb)));
2052}
2053
1977/** 2054/**
1978 * dev_queue_xmit - transmit a buffer 2055 * dev_queue_xmit - transmit a buffer
1979 * @skb: buffer to transmit 2056 * @skb: buffer to transmit
@@ -2010,18 +2087,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2010 if (netif_needs_gso(dev, skb)) 2087 if (netif_needs_gso(dev, skb))
2011 goto gso; 2088 goto gso;
2012 2089
2013 if (skb_has_frags(skb) && 2090 /* Convert a paged skb to linear, if required */
2014 !(dev->features & NETIF_F_FRAGLIST) && 2091 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2015 __skb_linearize(skb))
2016 goto out_kfree_skb;
2017
2018 /* Fragmented skb is linearized if device does not support SG,
2019 * or if at least one of fragments is in highmem and device
2020 * does not support DMA from it.
2021 */
2022 if (skb_shinfo(skb)->nr_frags &&
2023 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
2024 __skb_linearize(skb))
2025 goto out_kfree_skb; 2092 goto out_kfree_skb;
2026 2093
2027 /* If packet is not checksummed and device does not support 2094 /* If packet is not checksummed and device does not support
@@ -2041,7 +2108,7 @@ gso:
2041 rcu_read_lock_bh(); 2108 rcu_read_lock_bh();
2042 2109
2043 txq = dev_pick_tx(dev, skb); 2110 txq = dev_pick_tx(dev, skb);
2044 q = rcu_dereference(txq->qdisc); 2111 q = rcu_dereference_bh(txq->qdisc);
2045 2112
2046#ifdef CONFIG_NET_CLS_ACT 2113#ifdef CONFIG_NET_CLS_ACT
2047 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2114 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
@@ -2421,7 +2488,9 @@ int netif_receive_skb(struct sk_buff *skb)
2421{ 2488{
2422 struct packet_type *ptype, *pt_prev; 2489 struct packet_type *ptype, *pt_prev;
2423 struct net_device *orig_dev; 2490 struct net_device *orig_dev;
2491 struct net_device *master;
2424 struct net_device *null_or_orig; 2492 struct net_device *null_or_orig;
2493 struct net_device *null_or_bond;
2425 int ret = NET_RX_DROP; 2494 int ret = NET_RX_DROP;
2426 __be16 type; 2495 __be16 type;
2427 2496
@@ -2440,11 +2509,12 @@ int netif_receive_skb(struct sk_buff *skb)
2440 2509
2441 null_or_orig = NULL; 2510 null_or_orig = NULL;
2442 orig_dev = skb->dev; 2511 orig_dev = skb->dev;
2443 if (orig_dev->master) { 2512 master = ACCESS_ONCE(orig_dev->master);
2444 if (skb_bond_should_drop(skb)) 2513 if (master) {
2514 if (skb_bond_should_drop(skb, master))
2445 null_or_orig = orig_dev; /* deliver only exact match */ 2515 null_or_orig = orig_dev; /* deliver only exact match */
2446 else 2516 else
2447 skb->dev = orig_dev->master; 2517 skb->dev = master;
2448 } 2518 }
2449 2519
2450 __get_cpu_var(netdev_rx_stat).total++; 2520 __get_cpu_var(netdev_rx_stat).total++;
@@ -2487,12 +2557,24 @@ ncls:
2487 if (!skb) 2557 if (!skb)
2488 goto out; 2558 goto out;
2489 2559
2560 /*
2561 * Make sure frames received on VLAN interfaces stacked on
2562 * bonding interfaces still make their way to any base bonding
2563 * device that may have registered for a specific ptype. The
2564 * handler may have to adjust skb->dev and orig_dev.
2565 */
2566 null_or_bond = NULL;
2567 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2568 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2569 null_or_bond = vlan_dev_real_dev(skb->dev);
2570 }
2571
2490 type = skb->protocol; 2572 type = skb->protocol;
2491 list_for_each_entry_rcu(ptype, 2573 list_for_each_entry_rcu(ptype,
2492 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2574 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2493 if (ptype->type == type && 2575 if (ptype->type == type && (ptype->dev == null_or_orig ||
2494 (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2576 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2495 ptype->dev == orig_dev)) { 2577 ptype->dev == null_or_bond)) {
2496 if (pt_prev) 2578 if (pt_prev)
2497 ret = deliver_skb(skb, pt_prev, orig_dev); 2579 ret = deliver_skb(skb, pt_prev, orig_dev);
2498 pt_prev = ptype; 2580 pt_prev = ptype;
@@ -2561,7 +2643,7 @@ out:
2561 return netif_receive_skb(skb); 2643 return netif_receive_skb(skb);
2562} 2644}
2563 2645
2564void napi_gro_flush(struct napi_struct *napi) 2646static void napi_gro_flush(struct napi_struct *napi)
2565{ 2647{
2566 struct sk_buff *skb, *next; 2648 struct sk_buff *skb, *next;
2567 2649
@@ -2574,7 +2656,6 @@ void napi_gro_flush(struct napi_struct *napi)
2574 napi->gro_count = 0; 2656 napi->gro_count = 0;
2575 napi->gro_list = NULL; 2657 napi->gro_list = NULL;
2576} 2658}
2577EXPORT_SYMBOL(napi_gro_flush);
2578 2659
2579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2660enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2580{ 2661{
@@ -2966,7 +3047,7 @@ static void net_rx_action(struct softirq_action *h)
2966 * entries to the tail of this list, and only ->poll() 3047 * entries to the tail of this list, and only ->poll()
2967 * calls can remove this head entry from the list. 3048 * calls can remove this head entry from the list.
2968 */ 3049 */
2969 n = list_entry(list->next, struct napi_struct, poll_list); 3050 n = list_first_entry(list, struct napi_struct, poll_list);
2970 3051
2971 have = netpoll_poll_lock(n); 3052 have = netpoll_poll_lock(n);
2972 3053
@@ -3185,7 +3266,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3185{ 3266{
3186 const struct net_device_stats *stats = dev_get_stats(dev); 3267 const struct net_device_stats *stats = dev_get_stats(dev);
3187 3268
3188 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " 3269 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3189 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", 3270 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3190 dev->name, stats->rx_bytes, stats->rx_packets, 3271 dev->name, stats->rx_bytes, stats->rx_packets,
3191 stats->rx_errors, 3272 stats->rx_errors,
@@ -3640,10 +3721,10 @@ void __dev_set_rx_mode(struct net_device *dev)
3640 /* Unicast addresses changes may only happen under the rtnl, 3721 /* Unicast addresses changes may only happen under the rtnl,
3641 * therefore calling __dev_set_promiscuity here is safe. 3722 * therefore calling __dev_set_promiscuity here is safe.
3642 */ 3723 */
3643 if (dev->uc.count > 0 && !dev->uc_promisc) { 3724 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3644 __dev_set_promiscuity(dev, 1); 3725 __dev_set_promiscuity(dev, 1);
3645 dev->uc_promisc = 1; 3726 dev->uc_promisc = 1;
3646 } else if (dev->uc.count == 0 && dev->uc_promisc) { 3727 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3647 __dev_set_promiscuity(dev, -1); 3728 __dev_set_promiscuity(dev, -1);
3648 dev->uc_promisc = 0; 3729 dev->uc_promisc = 0;
3649 } 3730 }
@@ -4211,7 +4292,7 @@ static void dev_addr_discard(struct net_device *dev)
4211 netif_addr_lock_bh(dev); 4292 netif_addr_lock_bh(dev);
4212 4293
4213 __dev_addr_discard(&dev->mc_list); 4294 __dev_addr_discard(&dev->mc_list);
4214 dev->mc_count = 0; 4295 netdev_mc_count(dev) = 0;
4215 4296
4216 netif_addr_unlock_bh(dev); 4297 netif_addr_unlock_bh(dev);
4217} 4298}
@@ -4247,18 +4328,10 @@ unsigned dev_get_flags(const struct net_device *dev)
4247} 4328}
4248EXPORT_SYMBOL(dev_get_flags); 4329EXPORT_SYMBOL(dev_get_flags);
4249 4330
4250/** 4331int __dev_change_flags(struct net_device *dev, unsigned int flags)
4251 * dev_change_flags - change device settings
4252 * @dev: device
4253 * @flags: device state flags
4254 *
4255 * Change settings on device based state flags. The flags are
4256 * in the userspace exported format.
4257 */
4258int dev_change_flags(struct net_device *dev, unsigned flags)
4259{ 4332{
4260 int ret, changes;
4261 int old_flags = dev->flags; 4333 int old_flags = dev->flags;
4334 int ret;
4262 4335
4263 ASSERT_RTNL(); 4336 ASSERT_RTNL();
4264 4337
@@ -4289,17 +4362,12 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4289 4362
4290 ret = 0; 4363 ret = 0;
4291 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 4364 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4292 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); 4365 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4293 4366
4294 if (!ret) 4367 if (!ret)
4295 dev_set_rx_mode(dev); 4368 dev_set_rx_mode(dev);
4296 } 4369 }
4297 4370
4298 if (dev->flags & IFF_UP &&
4299 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4300 IFF_VOLATILE)))
4301 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4302
4303 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4371 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4304 int inc = (flags & IFF_PROMISC) ? 1 : -1; 4372 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4305 4373
@@ -4318,11 +4386,47 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4318 dev_set_allmulti(dev, inc); 4386 dev_set_allmulti(dev, inc);
4319 } 4387 }
4320 4388
4321 /* Exclude state transition flags, already notified */ 4389 return ret;
4322 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); 4390}
4391
4392void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4393{
4394 unsigned int changes = dev->flags ^ old_flags;
4395
4396 if (changes & IFF_UP) {
4397 if (dev->flags & IFF_UP)
4398 call_netdevice_notifiers(NETDEV_UP, dev);
4399 else
4400 call_netdevice_notifiers(NETDEV_DOWN, dev);
4401 }
4402
4403 if (dev->flags & IFF_UP &&
4404 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4405 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4406}
4407
4408/**
4409 * dev_change_flags - change device settings
4410 * @dev: device
4411 * @flags: device state flags
4412 *
4413 * Change settings on device based state flags. The flags are
4414 * in the userspace exported format.
4415 */
4416int dev_change_flags(struct net_device *dev, unsigned flags)
4417{
4418 int ret, changes;
4419 int old_flags = dev->flags;
4420
4421 ret = __dev_change_flags(dev, flags);
4422 if (ret < 0)
4423 return ret;
4424
4425 changes = old_flags ^ dev->flags;
4323 if (changes) 4426 if (changes)
4324 rtmsg_ifinfo(RTM_NEWLINK, dev, changes); 4427 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4325 4428
4429 __dev_notify_flags(dev, old_flags);
4326 return ret; 4430 return ret;
4327} 4431}
4328EXPORT_SYMBOL(dev_change_flags); 4432EXPORT_SYMBOL(dev_change_flags);
@@ -4813,6 +4917,10 @@ static void rollback_registered_many(struct list_head *head)
4813 */ 4917 */
4814 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 4918 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4815 4919
4920 if (!dev->rtnl_link_ops ||
4921 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4922 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4923
4816 /* 4924 /*
4817 * Flush the unicast and multicast chains 4925 * Flush the unicast and multicast chains
4818 */ 4926 */
@@ -4830,7 +4938,7 @@ static void rollback_registered_many(struct list_head *head)
4830 } 4938 }
4831 4939
4832 /* Process any work delayed until the end of the batch */ 4940 /* Process any work delayed until the end of the batch */
4833 dev = list_entry(head->next, struct net_device, unreg_list); 4941 dev = list_first_entry(head, struct net_device, unreg_list);
4834 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 4942 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4835 4943
4836 synchronize_net(); 4944 synchronize_net();
@@ -5039,7 +5147,9 @@ int register_netdevice(struct net_device *dev)
5039 * Prevent userspace races by waiting until the network 5147 * Prevent userspace races by waiting until the network
5040 * device is fully setup before sending notifications. 5148 * device is fully setup before sending notifications.
5041 */ 5149 */
5042 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); 5150 if (!dev->rtnl_link_ops ||
5151 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5152 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5043 5153
5044out: 5154out:
5045 return ret; 5155 return ret;
@@ -5216,7 +5326,7 @@ void netdev_run_todo(void)
5216 5326
5217 while (!list_empty(&list)) { 5327 while (!list_empty(&list)) {
5218 struct net_device *dev 5328 struct net_device *dev
5219 = list_entry(list.next, struct net_device, todo_list); 5329 = list_first_entry(&list, struct net_device, todo_list);
5220 list_del(&dev->todo_list); 5330 list_del(&dev->todo_list);
5221 5331
5222 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5332 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
@@ -5367,6 +5477,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5367 5477
5368 netdev_init_queues(dev); 5478 netdev_init_queues(dev);
5369 5479
5480 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5481 dev->ethtool_ntuple_list.count = 0;
5370 INIT_LIST_HEAD(&dev->napi_list); 5482 INIT_LIST_HEAD(&dev->napi_list);
5371 INIT_LIST_HEAD(&dev->unreg_list); 5483 INIT_LIST_HEAD(&dev->unreg_list);
5372 INIT_LIST_HEAD(&dev->link_watch_list); 5484 INIT_LIST_HEAD(&dev->link_watch_list);
@@ -5403,6 +5515,9 @@ void free_netdev(struct net_device *dev)
5403 /* Flush device addresses */ 5515 /* Flush device addresses */
5404 dev_addr_flush(dev); 5516 dev_addr_flush(dev);
5405 5517
5518 /* Clear ethtool n-tuple list */
5519 ethtool_ntuple_flush(dev);
5520
5406 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5521 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5407 netif_napi_del(p); 5522 netif_napi_del(p);
5408 5523
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 9e2fa39f22a3..3dc295beb483 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -96,7 +96,10 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
96 int err; 96 int err;
97 97
98 netif_addr_lock_bh(dev); 98 netif_addr_lock_bh(dev);
99 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); 99 if (alen != dev->addr_len)
100 err = -EINVAL;
101 else
102 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
100 if (!err) 103 if (!err)
101 __dev_set_rx_mode(dev); 104 __dev_set_rx_mode(dev);
102 netif_addr_unlock_bh(dev); 105 netif_addr_unlock_bh(dev);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b8e9d3a86887..cf208d8042b1 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -21,6 +21,7 @@
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <linux/timer.h> 22#include <linux/timer.h>
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/slab.h>
24#include <net/genetlink.h> 25#include <net/genetlink.h>
25#include <net/netevent.h> 26#include <net/netevent.h>
26 27
@@ -296,7 +297,6 @@ static int dropmon_net_event(struct notifier_block *ev_block,
296 297
297 new_stat->dev = dev; 298 new_stat->dev = dev;
298 new_stat->last_rx = jiffies; 299 new_stat->last_rx = jiffies;
299 INIT_RCU_HEAD(&new_stat->rcu);
300 spin_lock(&trace_state_lock); 300 spin_lock(&trace_state_lock);
301 list_add_rcu(&new_stat->list, &hw_stats_list); 301 list_add_rcu(&new_stat->list, &hw_stats_list);
302 spin_unlock(&trace_state_lock); 302 spin_unlock(&trace_state_lock);
diff --git a/net/core/dst.c b/net/core/dst.c
index cb1b3488b739..f307bc18f6a0 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -12,6 +12,7 @@
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17#include <linux/string.h> 18#include <linux/string.h>
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 236a9988ea91..9d55c57f318a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,8 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/bitops.h>
21#include <linux/slab.h>
20#include <asm/uaccess.h> 22#include <asm/uaccess.h>
21 23
22/* 24/*
@@ -120,7 +122,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
120 * NETIF_F_xxx values in include/linux/netdevice.h 122 * NETIF_F_xxx values in include/linux/netdevice.h
121 */ 123 */
122static const u32 flags_dup_features = 124static const u32 flags_dup_features =
123 ETH_FLAG_LRO; 125 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
124 126
125u32 ethtool_op_get_flags(struct net_device *dev) 127u32 ethtool_op_get_flags(struct net_device *dev)
126{ 128{
@@ -134,19 +136,44 @@ u32 ethtool_op_get_flags(struct net_device *dev)
134 136
135int ethtool_op_set_flags(struct net_device *dev, u32 data) 137int ethtool_op_set_flags(struct net_device *dev, u32 data)
136{ 138{
139 const struct ethtool_ops *ops = dev->ethtool_ops;
140 unsigned long features = dev->features;
141
137 if (data & ETH_FLAG_LRO) 142 if (data & ETH_FLAG_LRO)
138 dev->features |= NETIF_F_LRO; 143 features |= NETIF_F_LRO;
139 else 144 else
140 dev->features &= ~NETIF_F_LRO; 145 features &= ~NETIF_F_LRO;
146
147 if (data & ETH_FLAG_NTUPLE) {
148 if (!ops->set_rx_ntuple)
149 return -EOPNOTSUPP;
150 features |= NETIF_F_NTUPLE;
151 } else {
152 /* safe to clear regardless */
153 features &= ~NETIF_F_NTUPLE;
154 }
141 155
156 dev->features = features;
142 return 0; 157 return 0;
143} 158}
144 159
160void ethtool_ntuple_flush(struct net_device *dev)
161{
162 struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
163
164 list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
165 list_del(&fsc->list);
166 kfree(fsc);
167 }
168 dev->ethtool_ntuple_list.count = 0;
169}
170EXPORT_SYMBOL(ethtool_ntuple_flush);
171
145/* Handlers for each ethtool command */ 172/* Handlers for each ethtool command */
146 173
147static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 174static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
148{ 175{
149 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 176 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
150 int err; 177 int err;
151 178
152 if (!dev->ethtool_ops->get_settings) 179 if (!dev->ethtool_ops->get_settings)
@@ -174,7 +201,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
174 return dev->ethtool_ops->set_settings(dev, &cmd); 201 return dev->ethtool_ops->set_settings(dev, &cmd);
175} 202}
176 203
177static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) 204static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
178{ 205{
179 struct ethtool_drvinfo info; 206 struct ethtool_drvinfo info;
180 const struct ethtool_ops *ops = dev->ethtool_ops; 207 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -186,6 +213,10 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
186 info.cmd = ETHTOOL_GDRVINFO; 213 info.cmd = ETHTOOL_GDRVINFO;
187 ops->get_drvinfo(dev, &info); 214 ops->get_drvinfo(dev, &info);
188 215
216 /*
217 * this method of obtaining string set info is deprecated;
218 * Use ETHTOOL_GSSET_INFO instead.
219 */
189 if (ops->get_sset_count) { 220 if (ops->get_sset_count) {
190 int rc; 221 int rc;
191 222
@@ -209,7 +240,67 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
209 return 0; 240 return 0;
210} 241}
211 242
212static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) 243static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
244 void __user *useraddr)
245{
246 struct ethtool_sset_info info;
247 const struct ethtool_ops *ops = dev->ethtool_ops;
248 u64 sset_mask;
249 int i, idx = 0, n_bits = 0, ret, rc;
250 u32 *info_buf = NULL;
251
252 if (!ops->get_sset_count)
253 return -EOPNOTSUPP;
254
255 if (copy_from_user(&info, useraddr, sizeof(info)))
256 return -EFAULT;
257
258 /* store copy of mask, because we zero struct later on */
259 sset_mask = info.sset_mask;
260 if (!sset_mask)
261 return 0;
262
263 /* calculate size of return buffer */
264 n_bits = hweight64(sset_mask);
265
266 memset(&info, 0, sizeof(info));
267 info.cmd = ETHTOOL_GSSET_INFO;
268
269 info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER);
270 if (!info_buf)
271 return -ENOMEM;
272
273 /*
274 * fill return buffer based on input bitmask and successful
275 * get_sset_count return
276 */
277 for (i = 0; i < 64; i++) {
278 if (!(sset_mask & (1ULL << i)))
279 continue;
280
281 rc = ops->get_sset_count(dev, i);
282 if (rc >= 0) {
283 info.sset_mask |= (1ULL << i);
284 info_buf[idx++] = rc;
285 }
286 }
287
288 ret = -EFAULT;
289 if (copy_to_user(useraddr, &info, sizeof(info)))
290 goto out;
291
292 useraddr += offsetof(struct ethtool_sset_info, data);
293 if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
294 goto out;
295
296 ret = 0;
297
298out:
299 kfree(info_buf);
300 return ret;
301}
302
303static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
213{ 304{
214 struct ethtool_rxnfc cmd; 305 struct ethtool_rxnfc cmd;
215 306
@@ -222,7 +313,7 @@ static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
222 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 313 return dev->ethtool_ops->set_rxnfc(dev, &cmd);
223} 314}
224 315
225static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) 316static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
226{ 317{
227 struct ethtool_rxnfc info; 318 struct ethtool_rxnfc info;
228 const struct ethtool_ops *ops = dev->ethtool_ops; 319 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -266,6 +357,312 @@ err_out:
266 return ret; 357 return ret;
267} 358}
268 359
360static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
361 struct ethtool_rx_ntuple_flow_spec *spec,
362 struct ethtool_rx_ntuple_flow_spec_container *fsc)
363{
364
365 /* don't add filters forever */
366 if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
367 /* free the container */
368 kfree(fsc);
369 return;
370 }
371
372 /* Copy the whole filter over */
373 fsc->fs.flow_type = spec->flow_type;
374 memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
375 memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
376
377 fsc->fs.vlan_tag = spec->vlan_tag;
378 fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
379 fsc->fs.data = spec->data;
380 fsc->fs.data_mask = spec->data_mask;
381 fsc->fs.action = spec->action;
382
383 /* add to the list */
384 list_add_tail_rcu(&fsc->list, &list->list);
385 list->count++;
386}
387
388static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
389{
390 struct ethtool_rx_ntuple cmd;
391 const struct ethtool_ops *ops = dev->ethtool_ops;
392 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
393 int ret;
394
395 if (!(dev->features & NETIF_F_NTUPLE))
396 return -EINVAL;
397
398 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
399 return -EFAULT;
400
401 /*
402 * Cache filter in dev struct for GET operation only if
403 * the underlying driver doesn't have its own GET operation, and
404 * only if the filter was added successfully. First make sure we
405 * can allocate the filter, then continue if successful.
406 */
407 if (!ops->get_rx_ntuple) {
408 fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
409 if (!fsc)
410 return -ENOMEM;
411 }
412
413 ret = ops->set_rx_ntuple(dev, &cmd);
414 if (ret) {
415 kfree(fsc);
416 return ret;
417 }
418
419 if (!ops->get_rx_ntuple)
420 __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
421
422 return ret;
423}
424
425static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
426{
427 struct ethtool_gstrings gstrings;
428 const struct ethtool_ops *ops = dev->ethtool_ops;
429 struct ethtool_rx_ntuple_flow_spec_container *fsc;
430 u8 *data;
431 char *p;
432 int ret, i, num_strings = 0;
433
434 if (!ops->get_sset_count)
435 return -EOPNOTSUPP;
436
437 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
438 return -EFAULT;
439
440 ret = ops->get_sset_count(dev, gstrings.string_set);
441 if (ret < 0)
442 return ret;
443
444 gstrings.len = ret;
445
446 data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
447 if (!data)
448 return -ENOMEM;
449
450 if (ops->get_rx_ntuple) {
451 /* driver-specific filter grab */
452 ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
453 goto copy;
454 }
455
456 /* default ethtool filter grab */
457 i = 0;
458 p = (char *)data;
459 list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
460 sprintf(p, "Filter %d:\n", i);
461 p += ETH_GSTRING_LEN;
462 num_strings++;
463
464 switch (fsc->fs.flow_type) {
465 case TCP_V4_FLOW:
466 sprintf(p, "\tFlow Type: TCP\n");
467 p += ETH_GSTRING_LEN;
468 num_strings++;
469 break;
470 case UDP_V4_FLOW:
471 sprintf(p, "\tFlow Type: UDP\n");
472 p += ETH_GSTRING_LEN;
473 num_strings++;
474 break;
475 case SCTP_V4_FLOW:
476 sprintf(p, "\tFlow Type: SCTP\n");
477 p += ETH_GSTRING_LEN;
478 num_strings++;
479 break;
480 case AH_ESP_V4_FLOW:
481 sprintf(p, "\tFlow Type: AH ESP\n");
482 p += ETH_GSTRING_LEN;
483 num_strings++;
484 break;
485 case ESP_V4_FLOW:
486 sprintf(p, "\tFlow Type: ESP\n");
487 p += ETH_GSTRING_LEN;
488 num_strings++;
489 break;
490 case IP_USER_FLOW:
491 sprintf(p, "\tFlow Type: Raw IP\n");
492 p += ETH_GSTRING_LEN;
493 num_strings++;
494 break;
495 case IPV4_FLOW:
496 sprintf(p, "\tFlow Type: IPv4\n");
497 p += ETH_GSTRING_LEN;
498 num_strings++;
499 break;
500 default:
501 sprintf(p, "\tFlow Type: Unknown\n");
502 p += ETH_GSTRING_LEN;
503 num_strings++;
504 goto unknown_filter;
505 };
506
507 /* now the rest of the filters */
508 switch (fsc->fs.flow_type) {
509 case TCP_V4_FLOW:
510 case UDP_V4_FLOW:
511 case SCTP_V4_FLOW:
512 sprintf(p, "\tSrc IP addr: 0x%x\n",
513 fsc->fs.h_u.tcp_ip4_spec.ip4src);
514 p += ETH_GSTRING_LEN;
515 num_strings++;
516 sprintf(p, "\tSrc IP mask: 0x%x\n",
517 fsc->fs.m_u.tcp_ip4_spec.ip4src);
518 p += ETH_GSTRING_LEN;
519 num_strings++;
520 sprintf(p, "\tDest IP addr: 0x%x\n",
521 fsc->fs.h_u.tcp_ip4_spec.ip4dst);
522 p += ETH_GSTRING_LEN;
523 num_strings++;
524 sprintf(p, "\tDest IP mask: 0x%x\n",
525 fsc->fs.m_u.tcp_ip4_spec.ip4dst);
526 p += ETH_GSTRING_LEN;
527 num_strings++;
528 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
529 fsc->fs.h_u.tcp_ip4_spec.psrc,
530 fsc->fs.m_u.tcp_ip4_spec.psrc);
531 p += ETH_GSTRING_LEN;
532 num_strings++;
533 sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
534 fsc->fs.h_u.tcp_ip4_spec.pdst,
535 fsc->fs.m_u.tcp_ip4_spec.pdst);
536 p += ETH_GSTRING_LEN;
537 num_strings++;
538 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
539 fsc->fs.h_u.tcp_ip4_spec.tos,
540 fsc->fs.m_u.tcp_ip4_spec.tos);
541 p += ETH_GSTRING_LEN;
542 num_strings++;
543 break;
544 case AH_ESP_V4_FLOW:
545 case ESP_V4_FLOW:
546 sprintf(p, "\tSrc IP addr: 0x%x\n",
547 fsc->fs.h_u.ah_ip4_spec.ip4src);
548 p += ETH_GSTRING_LEN;
549 num_strings++;
550 sprintf(p, "\tSrc IP mask: 0x%x\n",
551 fsc->fs.m_u.ah_ip4_spec.ip4src);
552 p += ETH_GSTRING_LEN;
553 num_strings++;
554 sprintf(p, "\tDest IP addr: 0x%x\n",
555 fsc->fs.h_u.ah_ip4_spec.ip4dst);
556 p += ETH_GSTRING_LEN;
557 num_strings++;
558 sprintf(p, "\tDest IP mask: 0x%x\n",
559 fsc->fs.m_u.ah_ip4_spec.ip4dst);
560 p += ETH_GSTRING_LEN;
561 num_strings++;
562 sprintf(p, "\tSPI: %d, mask: 0x%x\n",
563 fsc->fs.h_u.ah_ip4_spec.spi,
564 fsc->fs.m_u.ah_ip4_spec.spi);
565 p += ETH_GSTRING_LEN;
566 num_strings++;
567 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
568 fsc->fs.h_u.ah_ip4_spec.tos,
569 fsc->fs.m_u.ah_ip4_spec.tos);
570 p += ETH_GSTRING_LEN;
571 num_strings++;
572 break;
573 case IP_USER_FLOW:
574 sprintf(p, "\tSrc IP addr: 0x%x\n",
575 fsc->fs.h_u.raw_ip4_spec.ip4src);
576 p += ETH_GSTRING_LEN;
577 num_strings++;
578 sprintf(p, "\tSrc IP mask: 0x%x\n",
579 fsc->fs.m_u.raw_ip4_spec.ip4src);
580 p += ETH_GSTRING_LEN;
581 num_strings++;
582 sprintf(p, "\tDest IP addr: 0x%x\n",
583 fsc->fs.h_u.raw_ip4_spec.ip4dst);
584 p += ETH_GSTRING_LEN;
585 num_strings++;
586 sprintf(p, "\tDest IP mask: 0x%x\n",
587 fsc->fs.m_u.raw_ip4_spec.ip4dst);
588 p += ETH_GSTRING_LEN;
589 num_strings++;
590 break;
591 case IPV4_FLOW:
592 sprintf(p, "\tSrc IP addr: 0x%x\n",
593 fsc->fs.h_u.usr_ip4_spec.ip4src);
594 p += ETH_GSTRING_LEN;
595 num_strings++;
596 sprintf(p, "\tSrc IP mask: 0x%x\n",
597 fsc->fs.m_u.usr_ip4_spec.ip4src);
598 p += ETH_GSTRING_LEN;
599 num_strings++;
600 sprintf(p, "\tDest IP addr: 0x%x\n",
601 fsc->fs.h_u.usr_ip4_spec.ip4dst);
602 p += ETH_GSTRING_LEN;
603 num_strings++;
604 sprintf(p, "\tDest IP mask: 0x%x\n",
605 fsc->fs.m_u.usr_ip4_spec.ip4dst);
606 p += ETH_GSTRING_LEN;
607 num_strings++;
608 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
609 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
610 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
611 p += ETH_GSTRING_LEN;
612 num_strings++;
613 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
614 fsc->fs.h_u.usr_ip4_spec.tos,
615 fsc->fs.m_u.usr_ip4_spec.tos);
616 p += ETH_GSTRING_LEN;
617 num_strings++;
618 sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
619 fsc->fs.h_u.usr_ip4_spec.ip_ver,
620 fsc->fs.m_u.usr_ip4_spec.ip_ver);
621 p += ETH_GSTRING_LEN;
622 num_strings++;
623 sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
624 fsc->fs.h_u.usr_ip4_spec.proto,
625 fsc->fs.m_u.usr_ip4_spec.proto);
626 p += ETH_GSTRING_LEN;
627 num_strings++;
628 break;
629 };
630 sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
631 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
632 p += ETH_GSTRING_LEN;
633 num_strings++;
634 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
635 p += ETH_GSTRING_LEN;
636 num_strings++;
637 sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
638 p += ETH_GSTRING_LEN;
639 num_strings++;
640 if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
641 sprintf(p, "\tAction: Drop\n");
642 else
643 sprintf(p, "\tAction: Direct to queue %d\n",
644 fsc->fs.action);
645 p += ETH_GSTRING_LEN;
646 num_strings++;
647unknown_filter:
648 i++;
649 }
650copy:
651 /* indicate to userspace how many strings we actually have */
652 gstrings.len = num_strings;
653 ret = -EFAULT;
654 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
655 goto out;
656 useraddr += sizeof(gstrings);
657 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
658 goto out;
659 ret = 0;
660
661out:
662 kfree(data);
663 return ret;
664}
665
269static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 666static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
270{ 667{
271 struct ethtool_regs regs; 668 struct ethtool_regs regs;
@@ -324,7 +721,7 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
324 721
325static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 722static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
326{ 723{
327 struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; 724 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
328 725
329 if (!dev->ethtool_ops->get_wol) 726 if (!dev->ethtool_ops->get_wol)
330 return -EOPNOTSUPP; 727 return -EOPNOTSUPP;
@@ -456,9 +853,9 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
456 return ret; 853 return ret;
457} 854}
458 855
459static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) 856static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
460{ 857{
461 struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; 858 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
462 859
463 if (!dev->ethtool_ops->get_coalesce) 860 if (!dev->ethtool_ops->get_coalesce)
464 return -EOPNOTSUPP; 861 return -EOPNOTSUPP;
@@ -470,7 +867,7 @@ static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
470 return 0; 867 return 0;
471} 868}
472 869
473static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) 870static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
474{ 871{
475 struct ethtool_coalesce coalesce; 872 struct ethtool_coalesce coalesce;
476 873
@@ -485,7 +882,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
485 882
486static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) 883static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
487{ 884{
488 struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; 885 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
489 886
490 if (!dev->ethtool_ops->get_ringparam) 887 if (!dev->ethtool_ops->get_ringparam)
491 return -EOPNOTSUPP; 888 return -EOPNOTSUPP;
@@ -839,7 +1236,7 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
839static int ethtool_get_value(struct net_device *dev, char __user *useraddr, 1236static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
840 u32 cmd, u32 (*actor)(struct net_device *)) 1237 u32 cmd, u32 (*actor)(struct net_device *))
841{ 1238{
842 struct ethtool_value edata = { cmd }; 1239 struct ethtool_value edata = { .cmd = cmd };
843 1240
844 if (!actor) 1241 if (!actor)
845 return -EOPNOTSUPP; 1242 return -EOPNOTSUPP;
@@ -880,7 +1277,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
880 return actor(dev, edata.data); 1277 return actor(dev, edata.data);
881} 1278}
882 1279
883static int ethtool_flash_device(struct net_device *dev, char __user *useraddr) 1280static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
884{ 1281{
885 struct ethtool_flash efl; 1282 struct ethtool_flash efl;
886 1283
@@ -1113,6 +1510,15 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1113 case ETHTOOL_RESET: 1510 case ETHTOOL_RESET:
1114 rc = ethtool_reset(dev, useraddr); 1511 rc = ethtool_reset(dev, useraddr);
1115 break; 1512 break;
1513 case ETHTOOL_SRXNTUPLE:
1514 rc = ethtool_set_rx_ntuple(dev, useraddr);
1515 break;
1516 case ETHTOOL_GRXNTUPLE:
1517 rc = ethtool_get_rx_ntuple(dev, useraddr);
1518 break;
1519 case ETHTOOL_GSSET_INFO:
1520 rc = ethtool_get_sset_info(dev, useraddr);
1521 break;
1116 default: 1522 default:
1117 rc = -EOPNOTSUPP; 1523 rc = -EOPNOTSUPP;
1118 } 1524 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 02a3b2c69c1e..d2c3e7dc2e5f 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/slab.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <net/net_namespace.h> 15#include <net/net_namespace.h>
15#include <net/sock.h> 16#include <net/sock.h>
@@ -708,7 +709,7 @@ static struct notifier_block fib_rules_notifier = {
708 .notifier_call = fib_rules_event, 709 .notifier_call = fib_rules_event,
709}; 710};
710 711
711static int fib_rules_net_init(struct net *net) 712static int __net_init fib_rules_net_init(struct net *net)
712{ 713{
713 INIT_LIST_HEAD(&net->rules_ops); 714 INIT_LIST_HEAD(&net->rules_ops);
714 spin_lock_init(&net->rules_mod_lock); 715 spin_lock_init(&net->rules_mod_lock);
diff --git a/net/core/filter.c b/net/core/filter.c
index 08db7b9143a3..ff943bed21af 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -25,6 +25,7 @@
25#include <linux/inet.h> 25#include <linux/inet.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/if_packet.h> 27#include <linux/if_packet.h>
28#include <linux/gfp.h>
28#include <net/ip.h> 29#include <net/ip.h>
29#include <net/protocol.h> 30#include <net/protocol.h>
30#include <net/netlink.h> 31#include <net/netlink.h>
@@ -86,7 +87,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
86 return err; 87 return err;
87 88
88 rcu_read_lock_bh(); 89 rcu_read_lock_bh();
89 filter = rcu_dereference(sk->sk_filter); 90 filter = rcu_dereference_bh(sk->sk_filter);
90 if (filter) { 91 if (filter) {
91 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 92 unsigned int pkt_len = sk_run_filter(skb, filter->insns,
92 filter->len); 93 filter->len);
@@ -521,7 +522,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
521 } 522 }
522 523
523 rcu_read_lock_bh(); 524 rcu_read_lock_bh();
524 old_fp = rcu_dereference(sk->sk_filter); 525 old_fp = rcu_dereference_bh(sk->sk_filter);
525 rcu_assign_pointer(sk->sk_filter, fp); 526 rcu_assign_pointer(sk->sk_filter, fp);
526 rcu_read_unlock_bh(); 527 rcu_read_unlock_bh();
527 528
@@ -529,6 +530,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
529 sk_filter_delayed_uncharge(sk, old_fp); 530 sk_filter_delayed_uncharge(sk, old_fp);
530 return 0; 531 return 0;
531} 532}
533EXPORT_SYMBOL_GPL(sk_attach_filter);
532 534
533int sk_detach_filter(struct sock *sk) 535int sk_detach_filter(struct sock *sk)
534{ 536{
@@ -536,7 +538,7 @@ int sk_detach_filter(struct sock *sk)
536 struct sk_filter *filter; 538 struct sk_filter *filter;
537 539
538 rcu_read_lock_bh(); 540 rcu_read_lock_bh();
539 filter = rcu_dereference(sk->sk_filter); 541 filter = rcu_dereference_bh(sk->sk_filter);
540 if (filter) { 542 if (filter) {
541 rcu_assign_pointer(sk->sk_filter, NULL); 543 rcu_assign_pointer(sk->sk_filter, NULL);
542 sk_filter_delayed_uncharge(sk, filter); 544 sk_filter_delayed_uncharge(sk, filter);
@@ -545,3 +547,4 @@ int sk_detach_filter(struct sock *sk)
545 rcu_read_unlock_bh(); 547 rcu_read_unlock_bh();
546 return ret; 548 return ret;
547} 549}
550EXPORT_SYMBOL_GPL(sk_detach_filter);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 493775f4f2f1..cf8e70392fe0 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -32,6 +32,7 @@
32#include <linux/rtnetlink.h> 32#include <linux/rtnetlink.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/rbtree.h> 34#include <linux/rbtree.h>
35#include <linux/slab.h>
35#include <net/sock.h> 36#include <net/sock.h>
36#include <net/gen_stats.h> 37#include <net/gen_stats.h>
37 38
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 16ad45d4882b..1e7f4e91a935 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/net.h> 23#include <linux/net.h>
25#include <linux/in6.h> 24#include <linux/in6.h>
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 5910b555a54a..bdbce2f5875b 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -19,7 +19,6 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/workqueue.h> 22#include <linux/workqueue.h>
24#include <linux/bitops.h> 23#include <linux/bitops.h>
25#include <asm/types.h> 24#include <asm/types.h>
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f35377b643e4..bff37908bd55 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -15,6 +15,7 @@
15 * Harald Welte Add neighbour cache statistics like rtstat 15 * Harald Welte Add neighbour cache statistics like rtstat
16 */ 16 */
17 17
18#include <linux/slab.h>
18#include <linux/types.h> 19#include <linux/types.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
@@ -771,6 +772,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
771} 772}
772 773
773static void neigh_invalidate(struct neighbour *neigh) 774static void neigh_invalidate(struct neighbour *neigh)
775 __releases(neigh->lock)
776 __acquires(neigh->lock)
774{ 777{
775 struct sk_buff *skb; 778 struct sk_buff *skb;
776 779
@@ -2417,8 +2420,7 @@ EXPORT_SYMBOL(neigh_seq_stop);
2417 2420
2418static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 2421static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2419{ 2422{
2420 struct proc_dir_entry *pde = seq->private; 2423 struct neigh_table *tbl = seq->private;
2421 struct neigh_table *tbl = pde->data;
2422 int cpu; 2424 int cpu;
2423 2425
2424 if (*pos == 0) 2426 if (*pos == 0)
@@ -2435,8 +2437,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2435 2437
2436static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2438static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2437{ 2439{
2438 struct proc_dir_entry *pde = seq->private; 2440 struct neigh_table *tbl = seq->private;
2439 struct neigh_table *tbl = pde->data;
2440 int cpu; 2441 int cpu;
2441 2442
2442 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 2443 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
@@ -2455,8 +2456,7 @@ static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2455 2456
2456static int neigh_stat_seq_show(struct seq_file *seq, void *v) 2457static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2457{ 2458{
2458 struct proc_dir_entry *pde = seq->private; 2459 struct neigh_table *tbl = seq->private;
2459 struct neigh_table *tbl = pde->data;
2460 struct neigh_statistics *st = v; 2460 struct neigh_statistics *st = v;
2461 2461
2462 if (v == SEQ_START_TOKEN) { 2462 if (v == SEQ_START_TOKEN) {
@@ -2501,7 +2501,7 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2501 2501
2502 if (!ret) { 2502 if (!ret) {
2503 struct seq_file *sf = file->private_data; 2503 struct seq_file *sf = file->private_data;
2504 sf->private = PDE(inode); 2504 sf->private = PDE(inode)->data;
2505 } 2505 }
2506 return ret; 2506 return ret;
2507}; 2507};
@@ -2559,9 +2559,11 @@ EXPORT_SYMBOL(neigh_app_ns);
2559 2559
2560#ifdef CONFIG_SYSCTL 2560#ifdef CONFIG_SYSCTL
2561 2561
2562#define NEIGH_VARS_MAX 19
2563
2562static struct neigh_sysctl_table { 2564static struct neigh_sysctl_table {
2563 struct ctl_table_header *sysctl_header; 2565 struct ctl_table_header *sysctl_header;
2564 struct ctl_table neigh_vars[__NET_NEIGH_MAX]; 2566 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2565 char *dev_name; 2567 char *dev_name;
2566} neigh_sysctl_template __read_mostly = { 2568} neigh_sysctl_template __read_mostly = {
2567 .neigh_vars = { 2569 .neigh_vars = {
@@ -2678,8 +2680,7 @@ static struct neigh_sysctl_table {
2678}; 2680};
2679 2681
2680int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 2682int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2681 int p_id, int pdev_id, char *p_name, 2683 char *p_name, proc_handler *handler)
2682 proc_handler *handler)
2683{ 2684{
2684 struct neigh_sysctl_table *t; 2685 struct neigh_sysctl_table *t;
2685 const char *dev_name_source = NULL; 2686 const char *dev_name_source = NULL;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 099c753c4213..59cfc7d8fc45 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/slab.h>
16#include <net/sock.h> 17#include <net/sock.h>
17#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
18#include <linux/wireless.h> 19#include <linux/wireless.h>
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index f1e982c508bb..afa6380ed88a 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -19,6 +19,7 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/netlink.h> 20#include <linux/netlink.h>
21#include <linux/net_dropmon.h> 21#include <linux/net_dropmon.h>
22#include <linux/slab.h>
22 23
23#include <asm/unaligned.h> 24#include <asm/unaligned.h>
24#include <asm/bitops.h> 25#include <asm/bitops.h>
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0b4d0d35ef40..a58f59b97597 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/slab.h>
25#include <net/tcp.h> 26#include <net/tcp.h>
26#include <net/udp.h> 27#include <net/udp.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
@@ -407,11 +408,24 @@ static void arp_reply(struct sk_buff *skb)
407 __be32 sip, tip; 408 __be32 sip, tip;
408 unsigned char *sha; 409 unsigned char *sha;
409 struct sk_buff *send_skb; 410 struct sk_buff *send_skb;
410 struct netpoll *np = NULL; 411 struct netpoll *np, *tmp;
412 unsigned long flags;
413 int hits = 0;
414
415 if (list_empty(&npinfo->rx_np))
416 return;
417
418 /* Before checking the packet, we do some early
419 inspection whether this is interesting at all */
420 spin_lock_irqsave(&npinfo->rx_lock, flags);
421 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
422 if (np->dev == skb->dev)
423 hits++;
424 }
425 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
411 426
412 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 427 /* No netpoll struct is using this dev */
413 np = npinfo->rx_np; 428 if (!hits)
414 if (!np)
415 return; 429 return;
416 430
417 /* No arp on this interface */ 431 /* No arp on this interface */
@@ -437,77 +451,91 @@ static void arp_reply(struct sk_buff *skb)
437 arp_ptr += skb->dev->addr_len; 451 arp_ptr += skb->dev->addr_len;
438 memcpy(&sip, arp_ptr, 4); 452 memcpy(&sip, arp_ptr, 4);
439 arp_ptr += 4; 453 arp_ptr += 4;
440 /* if we actually cared about dst hw addr, it would get copied here */ 454 /* If we actually cared about dst hw addr,
455 it would get copied here */
441 arp_ptr += skb->dev->addr_len; 456 arp_ptr += skb->dev->addr_len;
442 memcpy(&tip, arp_ptr, 4); 457 memcpy(&tip, arp_ptr, 4);
443 458
444 /* Should we ignore arp? */ 459 /* Should we ignore arp? */
445 if (tip != np->local_ip || 460 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
446 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
447 return; 461 return;
448 462
449 size = arp_hdr_len(skb->dev); 463 size = arp_hdr_len(skb->dev);
450 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
451 LL_RESERVED_SPACE(np->dev));
452 464
453 if (!send_skb) 465 spin_lock_irqsave(&npinfo->rx_lock, flags);
454 return; 466 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
455 467 if (tip != np->local_ip)
456 skb_reset_network_header(send_skb); 468 continue;
457 arp = (struct arphdr *) skb_put(send_skb, size);
458 send_skb->dev = skb->dev;
459 send_skb->protocol = htons(ETH_P_ARP);
460 469
461 /* Fill the device header for the ARP frame */ 470 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
462 if (dev_hard_header(send_skb, skb->dev, ptype, 471 LL_RESERVED_SPACE(np->dev));
463 sha, np->dev->dev_addr, 472 if (!send_skb)
464 send_skb->len) < 0) { 473 continue;
465 kfree_skb(send_skb);
466 return;
467 }
468 474
469 /* 475 skb_reset_network_header(send_skb);
470 * Fill out the arp protocol part. 476 arp = (struct arphdr *) skb_put(send_skb, size);
471 * 477 send_skb->dev = skb->dev;
472 * we only support ethernet device type, 478 send_skb->protocol = htons(ETH_P_ARP);
473 * which (according to RFC 1390) should always equal 1 (Ethernet).
474 */
475 479
476 arp->ar_hrd = htons(np->dev->type); 480 /* Fill the device header for the ARP frame */
477 arp->ar_pro = htons(ETH_P_IP); 481 if (dev_hard_header(send_skb, skb->dev, ptype,
478 arp->ar_hln = np->dev->addr_len; 482 sha, np->dev->dev_addr,
479 arp->ar_pln = 4; 483 send_skb->len) < 0) {
480 arp->ar_op = htons(type); 484 kfree_skb(send_skb);
485 continue;
486 }
481 487
482 arp_ptr=(unsigned char *)(arp + 1); 488 /*
483 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); 489 * Fill out the arp protocol part.
484 arp_ptr += np->dev->addr_len; 490 *
485 memcpy(arp_ptr, &tip, 4); 491 * we only support ethernet device type,
486 arp_ptr += 4; 492 * which (according to RFC 1390) should
487 memcpy(arp_ptr, sha, np->dev->addr_len); 493 * always equal 1 (Ethernet).
488 arp_ptr += np->dev->addr_len; 494 */
489 memcpy(arp_ptr, &sip, 4);
490 495
491 netpoll_send_skb(np, send_skb); 496 arp->ar_hrd = htons(np->dev->type);
497 arp->ar_pro = htons(ETH_P_IP);
498 arp->ar_hln = np->dev->addr_len;
499 arp->ar_pln = 4;
500 arp->ar_op = htons(type);
501
502 arp_ptr = (unsigned char *)(arp + 1);
503 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
504 arp_ptr += np->dev->addr_len;
505 memcpy(arp_ptr, &tip, 4);
506 arp_ptr += 4;
507 memcpy(arp_ptr, sha, np->dev->addr_len);
508 arp_ptr += np->dev->addr_len;
509 memcpy(arp_ptr, &sip, 4);
510
511 netpoll_send_skb(np, send_skb);
512
513 /* If there are several rx_hooks for the same address,
514 we're fine by sending a single reply */
515 break;
516 }
517 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
492} 518}
493 519
494int __netpoll_rx(struct sk_buff *skb) 520int __netpoll_rx(struct sk_buff *skb)
495{ 521{
496 int proto, len, ulen; 522 int proto, len, ulen;
523 int hits = 0;
497 struct iphdr *iph; 524 struct iphdr *iph;
498 struct udphdr *uh; 525 struct udphdr *uh;
499 struct netpoll_info *npi = skb->dev->npinfo; 526 struct netpoll_info *npinfo = skb->dev->npinfo;
500 struct netpoll *np = npi->rx_np; 527 struct netpoll *np, *tmp;
501 528
502 if (!np) 529 if (list_empty(&npinfo->rx_np))
503 goto out; 530 goto out;
531
504 if (skb->dev->type != ARPHRD_ETHER) 532 if (skb->dev->type != ARPHRD_ETHER)
505 goto out; 533 goto out;
506 534
507 /* check if netpoll clients need ARP */ 535 /* check if netpoll clients need ARP */
508 if (skb->protocol == htons(ETH_P_ARP) && 536 if (skb->protocol == htons(ETH_P_ARP) &&
509 atomic_read(&trapped)) { 537 atomic_read(&trapped)) {
510 skb_queue_tail(&npi->arp_tx, skb); 538 skb_queue_tail(&npinfo->arp_tx, skb);
511 return 1; 539 return 1;
512 } 540 }
513 541
@@ -551,16 +579,23 @@ int __netpoll_rx(struct sk_buff *skb)
551 goto out; 579 goto out;
552 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) 580 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
553 goto out; 581 goto out;
554 if (np->local_ip && np->local_ip != iph->daddr)
555 goto out;
556 if (np->remote_ip && np->remote_ip != iph->saddr)
557 goto out;
558 if (np->local_port && np->local_port != ntohs(uh->dest))
559 goto out;
560 582
561 np->rx_hook(np, ntohs(uh->source), 583 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
562 (char *)(uh+1), 584 if (np->local_ip && np->local_ip != iph->daddr)
563 ulen - sizeof(struct udphdr)); 585 continue;
586 if (np->remote_ip && np->remote_ip != iph->saddr)
587 continue;
588 if (np->local_port && np->local_port != ntohs(uh->dest))
589 continue;
590
591 np->rx_hook(np, ntohs(uh->source),
592 (char *)(uh+1),
593 ulen - sizeof(struct udphdr));
594 hits++;
595 }
596
597 if (!hits)
598 goto out;
564 599
565 kfree_skb(skb); 600 kfree_skb(skb);
566 return 1; 601 return 1;
@@ -580,7 +615,7 @@ void netpoll_print_options(struct netpoll *np)
580 np->name, np->local_port); 615 np->name, np->local_port);
581 printk(KERN_INFO "%s: local IP %pI4\n", 616 printk(KERN_INFO "%s: local IP %pI4\n",
582 np->name, &np->local_ip); 617 np->name, &np->local_ip);
583 printk(KERN_INFO "%s: interface %s\n", 618 printk(KERN_INFO "%s: interface '%s'\n",
584 np->name, np->dev_name); 619 np->name, np->dev_name);
585 printk(KERN_INFO "%s: remote port %d\n", 620 printk(KERN_INFO "%s: remote port %d\n",
586 np->name, np->remote_port); 621 np->name, np->remote_port);
@@ -627,6 +662,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
627 if ((delim = strchr(cur, '@')) == NULL) 662 if ((delim = strchr(cur, '@')) == NULL)
628 goto parse_failed; 663 goto parse_failed;
629 *delim = 0; 664 *delim = 0;
665 if (*cur == ' ' || *cur == '\t')
666 printk(KERN_INFO "%s: warning: whitespace"
667 "is not allowed\n", np->name);
630 np->remote_port = simple_strtol(cur, NULL, 10); 668 np->remote_port = simple_strtol(cur, NULL, 10);
631 cur = delim; 669 cur = delim;
632 } 670 }
@@ -674,7 +712,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
674 return 0; 712 return 0;
675 713
676 parse_failed: 714 parse_failed:
677 printk(KERN_INFO "%s: couldn't parse config at %s!\n", 715 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
678 np->name, cur); 716 np->name, cur);
679 return -1; 717 return -1;
680} 718}
@@ -684,6 +722,7 @@ int netpoll_setup(struct netpoll *np)
684 struct net_device *ndev = NULL; 722 struct net_device *ndev = NULL;
685 struct in_device *in_dev; 723 struct in_device *in_dev;
686 struct netpoll_info *npinfo; 724 struct netpoll_info *npinfo;
725 struct netpoll *npe, *tmp;
687 unsigned long flags; 726 unsigned long flags;
688 int err; 727 int err;
689 728
@@ -700,11 +739,11 @@ int netpoll_setup(struct netpoll *np)
700 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 739 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
701 if (!npinfo) { 740 if (!npinfo) {
702 err = -ENOMEM; 741 err = -ENOMEM;
703 goto release; 742 goto put;
704 } 743 }
705 744
706 npinfo->rx_flags = 0; 745 npinfo->rx_flags = 0;
707 npinfo->rx_np = NULL; 746 INIT_LIST_HEAD(&npinfo->rx_np);
708 747
709 spin_lock_init(&npinfo->rx_lock); 748 spin_lock_init(&npinfo->rx_lock);
710 skb_queue_head_init(&npinfo->arp_tx); 749 skb_queue_head_init(&npinfo->arp_tx);
@@ -785,7 +824,7 @@ int netpoll_setup(struct netpoll *np)
785 if (np->rx_hook) { 824 if (np->rx_hook) {
786 spin_lock_irqsave(&npinfo->rx_lock, flags); 825 spin_lock_irqsave(&npinfo->rx_lock, flags);
787 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 826 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
788 npinfo->rx_np = np; 827 list_add_tail(&np->rx, &npinfo->rx_np);
789 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 828 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
790 } 829 }
791 830
@@ -801,9 +840,16 @@ int netpoll_setup(struct netpoll *np)
801 return 0; 840 return 0;
802 841
803 release: 842 release:
804 if (!ndev->npinfo) 843 if (!ndev->npinfo) {
844 spin_lock_irqsave(&npinfo->rx_lock, flags);
845 list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
846 npe->dev = NULL;
847 }
848 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
849
805 kfree(npinfo); 850 kfree(npinfo);
806 np->dev = NULL; 851 }
852put:
807 dev_put(ndev); 853 dev_put(ndev);
808 return err; 854 return err;
809} 855}
@@ -823,10 +869,11 @@ void netpoll_cleanup(struct netpoll *np)
823 if (np->dev) { 869 if (np->dev) {
824 npinfo = np->dev->npinfo; 870 npinfo = np->dev->npinfo;
825 if (npinfo) { 871 if (npinfo) {
826 if (npinfo->rx_np == np) { 872 if (!list_empty(&npinfo->rx_np)) {
827 spin_lock_irqsave(&npinfo->rx_lock, flags); 873 spin_lock_irqsave(&npinfo->rx_lock, flags);
828 npinfo->rx_np = NULL; 874 list_del(&np->rx);
829 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 875 if (list_empty(&npinfo->rx_np))
876 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
830 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 877 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
831 } 878 }
832 879
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2e692afdc55d..43923811bd6a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2188,12 +2188,13 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
2188/* If there was already an IPSEC SA, we keep it as is, else 2188/* If there was already an IPSEC SA, we keep it as is, else
2189 * we go look for it ... 2189 * we go look for it ...
2190*/ 2190*/
2191#define DUMMY_MARK 0
2191static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2192static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2192{ 2193{
2193 struct xfrm_state *x = pkt_dev->flows[flow].x; 2194 struct xfrm_state *x = pkt_dev->flows[flow].x;
2194 if (!x) { 2195 if (!x) {
2195 /*slow path: we dont already have xfrm_state*/ 2196 /*slow path: we dont already have xfrm_state*/
2196 x = xfrm_stateonly_find(&init_net, 2197 x = xfrm_stateonly_find(&init_net, DUMMY_MARK,
2197 (xfrm_address_t *)&pkt_dev->cur_daddr, 2198 (xfrm_address_t *)&pkt_dev->cur_daddr,
2198 (xfrm_address_t *)&pkt_dev->cur_saddr, 2199 (xfrm_address_t *)&pkt_dev->cur_saddr,
2199 AF_INET, 2200 AF_INET,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 794bcb897ff0..fe776c9ddeca 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,6 +35,7 @@
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/if_addr.h> 37#include <linux/if_addr.h>
38#include <linux/pci.h>
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/system.h> 41#include <asm/system.h>
@@ -89,6 +90,14 @@ int rtnl_is_locked(void)
89} 90}
90EXPORT_SYMBOL(rtnl_is_locked); 91EXPORT_SYMBOL(rtnl_is_locked);
91 92
93#ifdef CONFIG_PROVE_LOCKING
94int lockdep_rtnl_is_held(void)
95{
96 return lockdep_is_held(&rtnl_mutex);
97}
98EXPORT_SYMBOL(lockdep_rtnl_is_held);
99#endif /* #ifdef CONFIG_PROVE_LOCKING */
100
92static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 101static struct rtnl_link *rtnl_msg_handlers[NPROTO];
93 102
94static inline int rtm_msgindex(int msgtype) 103static inline int rtm_msgindex(int msgtype)
@@ -548,6 +557,19 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
548 } 557 }
549} 558}
550 559
560static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
561 const struct ifinfomsg *ifm)
562{
563 unsigned int flags = ifm->ifi_flags;
564
565 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
566 if (ifm->ifi_change)
567 flags = (flags & ifm->ifi_change) |
568 (dev->flags & ~ifm->ifi_change);
569
570 return flags;
571}
572
551static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 573static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
552 const struct net_device_stats *b) 574 const struct net_device_stats *b)
553{ 575{
@@ -580,6 +602,15 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
580 a->tx_compressed = b->tx_compressed; 602 a->tx_compressed = b->tx_compressed;
581}; 603};
582 604
605static inline int rtnl_vfinfo_size(const struct net_device *dev)
606{
607 if (dev->dev.parent && dev_is_pci(dev->dev.parent))
608 return dev_num_vf(dev->dev.parent) *
609 sizeof(struct ifla_vf_info);
610 else
611 return 0;
612}
613
583static inline size_t if_nlmsg_size(const struct net_device *dev) 614static inline size_t if_nlmsg_size(const struct net_device *dev)
584{ 615{
585 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 616 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -597,6 +628,8 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
597 + nla_total_size(4) /* IFLA_MASTER */ 628 + nla_total_size(4) /* IFLA_MASTER */
598 + nla_total_size(1) /* IFLA_OPERSTATE */ 629 + nla_total_size(1) /* IFLA_OPERSTATE */
599 + nla_total_size(1) /* IFLA_LINKMODE */ 630 + nla_total_size(1) /* IFLA_LINKMODE */
631 + nla_total_size(4) /* IFLA_NUM_VF */
632 + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */
600 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 633 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
601} 634}
602 635
@@ -665,6 +698,17 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
665 stats = dev_get_stats(dev); 698 stats = dev_get_stats(dev);
666 copy_rtnl_link_stats(nla_data(attr), stats); 699 copy_rtnl_link_stats(nla_data(attr), stats);
667 700
701 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
702 int i;
703 struct ifla_vf_info ivi;
704
705 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
706 for (i = 0; i < dev_num_vf(dev->dev.parent); i++) {
707 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
708 break;
709 NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi);
710 }
711 }
668 if (dev->rtnl_link_ops) { 712 if (dev->rtnl_link_ops) {
669 if (rtnl_link_fill(skb, dev) < 0) 713 if (rtnl_link_fill(skb, dev) < 0)
670 goto nla_put_failure; 714 goto nla_put_failure;
@@ -725,6 +769,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
725 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 769 [IFLA_LINKINFO] = { .type = NLA_NESTED },
726 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 770 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
727 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 771 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
772 [IFLA_VF_MAC] = { .type = NLA_BINARY,
773 .len = sizeof(struct ifla_vf_mac) },
774 [IFLA_VF_VLAN] = { .type = NLA_BINARY,
775 .len = sizeof(struct ifla_vf_vlan) },
776 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
777 .len = sizeof(struct ifla_vf_tx_rate) },
728}; 778};
729EXPORT_SYMBOL(ifla_policy); 779EXPORT_SYMBOL(ifla_policy);
730 780
@@ -875,13 +925,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
875 } 925 }
876 926
877 if (ifm->ifi_flags || ifm->ifi_change) { 927 if (ifm->ifi_flags || ifm->ifi_change) {
878 unsigned int flags = ifm->ifi_flags; 928 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
879
880 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
881 if (ifm->ifi_change)
882 flags = (flags & ifm->ifi_change) |
883 (dev->flags & ~ifm->ifi_change);
884 err = dev_change_flags(dev, flags);
885 if (err < 0) 929 if (err < 0)
886 goto errout; 930 goto errout;
887 } 931 }
@@ -898,6 +942,41 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
898 write_unlock_bh(&dev_base_lock); 942 write_unlock_bh(&dev_base_lock);
899 } 943 }
900 944
945 if (tb[IFLA_VF_MAC]) {
946 struct ifla_vf_mac *ivm;
947 ivm = nla_data(tb[IFLA_VF_MAC]);
948 err = -EOPNOTSUPP;
949 if (ops->ndo_set_vf_mac)
950 err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac);
951 if (err < 0)
952 goto errout;
953 modified = 1;
954 }
955
956 if (tb[IFLA_VF_VLAN]) {
957 struct ifla_vf_vlan *ivv;
958 ivv = nla_data(tb[IFLA_VF_VLAN]);
959 err = -EOPNOTSUPP;
960 if (ops->ndo_set_vf_vlan)
961 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
962 ivv->vlan,
963 ivv->qos);
964 if (err < 0)
965 goto errout;
966 modified = 1;
967 }
968 err = 0;
969
970 if (tb[IFLA_VF_TX_RATE]) {
971 struct ifla_vf_tx_rate *ivt;
972 ivt = nla_data(tb[IFLA_VF_TX_RATE]);
973 err = -EOPNOTSUPP;
974 if (ops->ndo_set_vf_tx_rate)
975 err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
976 if (err < 0)
977 goto errout;
978 modified = 1;
979 }
901 err = 0; 980 err = 0;
902 981
903errout: 982errout:
@@ -989,6 +1068,26 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
989 return 0; 1068 return 0;
990} 1069}
991 1070
1071int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
1072{
1073 unsigned int old_flags;
1074 int err;
1075
1076 old_flags = dev->flags;
1077 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
1078 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
1079 if (err < 0)
1080 return err;
1081 }
1082
1083 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
1084 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1085
1086 __dev_notify_flags(dev, old_flags);
1087 return 0;
1088}
1089EXPORT_SYMBOL(rtnl_configure_link);
1090
992struct net_device *rtnl_create_link(struct net *src_net, struct net *net, 1091struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
993 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) 1092 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
994{ 1093{
@@ -1010,6 +1109,7 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1010 1109
1011 dev_net_set(dev, net); 1110 dev_net_set(dev, net);
1012 dev->rtnl_link_ops = ops; 1111 dev->rtnl_link_ops = ops;
1112 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1013 dev->real_num_tx_queues = real_num_queues; 1113 dev->real_num_tx_queues = real_num_queues;
1014 1114
1015 if (strchr(dev->name, '%')) { 1115 if (strchr(dev->name, '%')) {
@@ -1139,7 +1239,7 @@ replay:
1139 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) 1239 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
1140 return -ENODEV; 1240 return -ENODEV;
1141 1241
1142 if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change) 1242 if (ifm->ifi_index)
1143 return -EOPNOTSUPP; 1243 return -EOPNOTSUPP;
1144 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) 1244 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
1145 return -EOPNOTSUPP; 1245 return -EOPNOTSUPP;
@@ -1170,9 +1270,16 @@ replay:
1170 err = ops->newlink(net, dev, tb, data); 1270 err = ops->newlink(net, dev, tb, data);
1171 else 1271 else
1172 err = register_netdevice(dev); 1272 err = register_netdevice(dev);
1273
1173 if (err < 0 && !IS_ERR(dev)) 1274 if (err < 0 && !IS_ERR(dev))
1174 free_netdev(dev); 1275 free_netdev(dev);
1276 if (err < 0)
1277 goto out;
1175 1278
1279 err = rtnl_configure_link(dev, ifm);
1280 if (err < 0)
1281 unregister_netdevice(dev);
1282out:
1176 put_net(dest_net); 1283 put_net(dest_net);
1177 return err; 1284 return err;
1178 } 1285 }
@@ -1361,17 +1468,14 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1361 struct net_device *dev = ptr; 1468 struct net_device *dev = ptr;
1362 1469
1363 switch (event) { 1470 switch (event) {
1364 case NETDEV_UNREGISTER:
1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
1366 break;
1367 case NETDEV_UP: 1471 case NETDEV_UP:
1368 case NETDEV_DOWN: 1472 case NETDEV_DOWN:
1369 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1473 case NETDEV_PRE_UP:
1370 break;
1371 case NETDEV_POST_INIT: 1474 case NETDEV_POST_INIT:
1372 case NETDEV_REGISTER: 1475 case NETDEV_REGISTER:
1373 case NETDEV_CHANGE: 1476 case NETDEV_CHANGE:
1374 case NETDEV_GOING_DOWN: 1477 case NETDEV_GOING_DOWN:
1478 case NETDEV_UNREGISTER:
1375 case NETDEV_UNREGISTER_BATCH: 1479 case NETDEV_UNREGISTER_BATCH:
1376 break; 1480 break;
1377 default: 1481 default:
@@ -1386,7 +1490,7 @@ static struct notifier_block rtnetlink_dev_notifier = {
1386}; 1490};
1387 1491
1388 1492
1389static int rtnetlink_net_init(struct net *net) 1493static int __net_init rtnetlink_net_init(struct net *net)
1390{ 1494{
1391 struct sock *sk; 1495 struct sock *sk;
1392 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1496 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
@@ -1397,7 +1501,7 @@ static int rtnetlink_net_init(struct net *net)
1397 return 0; 1501 return 0;
1398} 1502}
1399 1503
1400static void rtnetlink_net_exit(struct net *net) 1504static void __net_exit rtnetlink_net_exit(struct net *net)
1401{ 1505{
1402 netlink_kernel_release(net->rtnl); 1506 netlink_kernel_release(net->rtnl);
1403 net->rtnl = NULL; 1507 net->rtnl = NULL;
diff --git a/net/core/scm.c b/net/core/scm.c
index b7ba91b074b3..b88f6f9d0b97 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -26,6 +26,7 @@
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/pid.h> 27#include <linux/pid.h>
28#include <linux/nsproxy.h> 28#include <linux/nsproxy.h>
29#include <linux/slab.h>
29 30
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
@@ -156,6 +157,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
156 switch (cmsg->cmsg_type) 157 switch (cmsg->cmsg_type)
157 { 158 {
158 case SCM_RIGHTS: 159 case SCM_RIGHTS:
160 if (!sock->ops || sock->ops->family != PF_UNIX)
161 goto error;
159 err=scm_fp_copy(cmsg, &p->fp); 162 err=scm_fp_copy(cmsg, &p->fp);
160 if (err<0) 163 if (err<0)
161 goto error; 164 goto error;
diff --git a/net/core/sock.c b/net/core/sock.c
index e1f6f225f012..c5812bbc2cc9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
340 rc = sk_backlog_rcv(sk, skb); 340 rc = sk_backlog_rcv(sk, skb);
341 341
342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
343 } else 343 } else if (sk_add_backlog(sk, skb)) {
344 sk_add_backlog(sk, skb); 344 bh_unlock_sock(sk);
345 atomic_inc(&sk->sk_drops);
346 goto discard_and_relse;
347 }
348
345 bh_unlock_sock(sk); 349 bh_unlock_sock(sk);
346out: 350out:
347 sock_put(sk); 351 sock_put(sk);
@@ -741,7 +745,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
741 struct timeval tm; 745 struct timeval tm;
742 } v; 746 } v;
743 747
744 unsigned int lv = sizeof(int); 748 int lv = sizeof(int);
745 int len; 749 int len;
746 750
747 if (get_user(len, optlen)) 751 if (get_user(len, optlen))
@@ -1073,7 +1077,8 @@ static void __sk_free(struct sock *sk)
1073 if (sk->sk_destruct) 1077 if (sk->sk_destruct)
1074 sk->sk_destruct(sk); 1078 sk->sk_destruct(sk);
1075 1079
1076 filter = rcu_dereference(sk->sk_filter); 1080 filter = rcu_dereference_check(sk->sk_filter,
1081 atomic_read(&sk->sk_wmem_alloc) == 0);
1077 if (filter) { 1082 if (filter) {
1078 sk_filter_uncharge(sk, filter); 1083 sk_filter_uncharge(sk, filter);
1079 rcu_assign_pointer(sk->sk_filter, NULL); 1084 rcu_assign_pointer(sk->sk_filter, NULL);
@@ -1138,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1138 sock_lock_init(newsk); 1143 sock_lock_init(newsk);
1139 bh_lock_sock(newsk); 1144 bh_lock_sock(newsk);
1140 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1145 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1146 newsk->sk_backlog.len = 0;
1141 1147
1142 atomic_set(&newsk->sk_rmem_alloc, 0); 1148 atomic_set(&newsk->sk_rmem_alloc, 0);
1143 /* 1149 /*
@@ -1541,6 +1547,12 @@ static void __release_sock(struct sock *sk)
1541 1547
1542 bh_lock_sock(sk); 1548 bh_lock_sock(sk);
1543 } while ((skb = sk->sk_backlog.head) != NULL); 1549 } while ((skb = sk->sk_backlog.head) != NULL);
1550
1551 /*
1552 * Doing the zeroing here guarantee we can not loop forever
1553 * while a wild producer attempts to flood us.
1554 */
1555 sk->sk_backlog.len = 0;
1544} 1556}
1545 1557
1546/** 1558/**
@@ -1873,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1873 sk->sk_allocation = GFP_KERNEL; 1885 sk->sk_allocation = GFP_KERNEL;
1874 sk->sk_rcvbuf = sysctl_rmem_default; 1886 sk->sk_rcvbuf = sysctl_rmem_default;
1875 sk->sk_sndbuf = sysctl_wmem_default; 1887 sk->sk_sndbuf = sysctl_wmem_default;
1888 sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
1876 sk->sk_state = TCP_CLOSE; 1889 sk->sk_state = TCP_CLOSE;
1877 sk_set_socket(sk, sock); 1890 sk_set_socket(sk, sock);
1878 1891
@@ -2140,13 +2153,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
2140} 2153}
2141EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2154EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2142 2155
2143static int sock_inuse_init_net(struct net *net) 2156static int __net_init sock_inuse_init_net(struct net *net)
2144{ 2157{
2145 net->core.inuse = alloc_percpu(struct prot_inuse); 2158 net->core.inuse = alloc_percpu(struct prot_inuse);
2146 return net->core.inuse ? 0 : -ENOMEM; 2159 return net->core.inuse ? 0 : -ENOMEM;
2147} 2160}
2148 2161
2149static void sock_inuse_exit_net(struct net *net) 2162static void __net_exit sock_inuse_exit_net(struct net *net)
2150{ 2163{
2151 free_percpu(net->core.inuse); 2164 free_percpu(net->core.inuse);
2152} 2165}
@@ -2228,13 +2241,10 @@ int proto_register(struct proto *prot, int alloc_slab)
2228 } 2241 }
2229 2242
2230 if (prot->rsk_prot != NULL) { 2243 if (prot->rsk_prot != NULL) {
2231 static const char mask[] = "request_sock_%s"; 2244 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2232
2233 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2234 if (prot->rsk_prot->slab_name == NULL) 2245 if (prot->rsk_prot->slab_name == NULL)
2235 goto out_free_sock_slab; 2246 goto out_free_sock_slab;
2236 2247
2237 sprintf(prot->rsk_prot->slab_name, mask, prot->name);
2238 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2248 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2239 prot->rsk_prot->obj_size, 0, 2249 prot->rsk_prot->obj_size, 0,
2240 SLAB_HWCACHE_ALIGN, NULL); 2250 SLAB_HWCACHE_ALIGN, NULL);
@@ -2247,14 +2257,11 @@ int proto_register(struct proto *prot, int alloc_slab)
2247 } 2257 }
2248 2258
2249 if (prot->twsk_prot != NULL) { 2259 if (prot->twsk_prot != NULL) {
2250 static const char mask[] = "tw_sock_%s"; 2260 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2251
2252 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2253 2261
2254 if (prot->twsk_prot->twsk_slab_name == NULL) 2262 if (prot->twsk_prot->twsk_slab_name == NULL)
2255 goto out_free_request_sock_slab; 2263 goto out_free_request_sock_slab;
2256 2264
2257 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
2258 prot->twsk_prot->twsk_slab = 2265 prot->twsk_prot->twsk_slab =
2259 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2266 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2260 prot->twsk_prot->twsk_obj_size, 2267 prot->twsk_prot->twsk_obj_size,
@@ -2281,7 +2288,8 @@ out_free_request_sock_slab:
2281 prot->rsk_prot->slab = NULL; 2288 prot->rsk_prot->slab = NULL;
2282 } 2289 }
2283out_free_request_sock_slab_name: 2290out_free_request_sock_slab_name:
2284 kfree(prot->rsk_prot->slab_name); 2291 if (prot->rsk_prot)
2292 kfree(prot->rsk_prot->slab_name);
2285out_free_sock_slab: 2293out_free_sock_slab:
2286 kmem_cache_destroy(prot->slab); 2294 kmem_cache_destroy(prot->slab);
2287 prot->slab = NULL; 2295 prot->slab = NULL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 06124872af5b..b7b6b8208f75 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -12,6 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/ratelimit.h> 13#include <linux/ratelimit.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h>
15 16
16#include <net/ip.h> 17#include <net/ip.h>
17#include <net/sock.h> 18#include <net/sock.h>
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index db9f5b39388f..19ac2b985485 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/netlink.h> 21#include <linux/netlink.h>
22#include <linux/slab.h>
22#include <net/netlink.h> 23#include <net/netlink.h>
23#include <net/rtnetlink.h> 24#include <net/rtnetlink.h>
24#include <linux/dcbnl.h> 25#include <linux/dcbnl.h>
@@ -54,7 +55,7 @@ MODULE_LICENSE("GPL");
54/**************** DCB attribute policies *************************************/ 55/**************** DCB attribute policies *************************************/
55 56
56/* DCB netlink attributes policy */ 57/* DCB netlink attributes policy */
57static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { 58static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
58 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, 59 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
59 [DCB_ATTR_STATE] = {.type = NLA_U8}, 60 [DCB_ATTR_STATE] = {.type = NLA_U8},
60 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, 61 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
@@ -68,7 +69,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
68}; 69};
69 70
70/* DCB priority flow control to User Priority nested attributes */ 71/* DCB priority flow control to User Priority nested attributes */
71static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { 72static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
72 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, 73 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
73 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, 74 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
74 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, 75 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
@@ -81,7 +82,7 @@ static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
81}; 82};
82 83
83/* DCB priority grouping nested attributes */ 84/* DCB priority grouping nested attributes */
84static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { 85static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
85 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, 86 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
86 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, 87 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
87 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, 88 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
@@ -103,7 +104,7 @@ static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
103}; 104};
104 105
105/* DCB traffic class nested attributes. */ 106/* DCB traffic class nested attributes. */
106static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { 107static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
107 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, 108 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
108 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, 109 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
109 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, 110 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
@@ -112,7 +113,7 @@ static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
112}; 113};
113 114
114/* DCB capabilities nested attributes. */ 115/* DCB capabilities nested attributes. */
115static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { 116static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
116 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, 117 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
117 [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, 118 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
118 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, 119 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
@@ -124,14 +125,14 @@ static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
124}; 125};
125 126
126/* DCB capabilities nested attributes. */ 127/* DCB capabilities nested attributes. */
127static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { 128static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
128 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, 129 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
129 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, 130 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
130 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, 131 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
131}; 132};
132 133
133/* DCB BCN nested attributes. */ 134/* DCB BCN nested attributes. */
134static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { 135static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
135 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, 136 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
136 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, 137 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
137 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, 138 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
@@ -160,7 +161,7 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
160}; 161};
161 162
162/* DCB APP nested attributes. */ 163/* DCB APP nested attributes. */
163static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { 164static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
164 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, 165 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
165 [DCB_APP_ATTR_ID] = {.type = NLA_U16}, 166 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
166 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 167 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index ff16e9df1969..36479ca61e03 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -11,6 +11,8 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/slab.h>
15
14#include "ccid.h" 16#include "ccid.h"
15#include "ccids/lib/tfrc.h" 17#include "ccids/lib/tfrc.h"
16 18
@@ -63,14 +65,13 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
63 u8 *ccid_array, array_len; 65 u8 *ccid_array, array_len;
64 int err = 0; 66 int err = 0;
65 67
66 if (len < ARRAY_SIZE(ccids))
67 return -EINVAL;
68
69 if (ccid_get_builtin_ccids(&ccid_array, &array_len)) 68 if (ccid_get_builtin_ccids(&ccid_array, &array_len))
70 return -ENOBUFS; 69 return -ENOBUFS;
71 70
72 if (put_user(array_len, optlen) || 71 if (put_user(array_len, optlen))
73 copy_to_user(optval, ccid_array, array_len)) 72 err = -EFAULT;
73 else if (len > 0 && copy_to_user(optval, ccid_array,
74 len > array_len ? array_len : len))
74 err = -EFAULT; 75 err = -EFAULT;
75 76
76 kfree(ccid_array); 77 kfree(ccid_array);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index a47a8c918ee8..9b3ae9922be1 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -23,6 +23,7 @@
23/* 23/*
24 * This implementation should follow RFC 4341 24 * This implementation should follow RFC 4341
25 */ 25 */
26#include <linux/slab.h>
26#include "../feat.h" 27#include "../feat.h"
27#include "../ccid.h" 28#include "../ccid.h"
28#include "../dccp.h" 29#include "../dccp.h"
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 972b8dc918d6..df7dd26cf07e 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -22,6 +22,7 @@
22 * 2 of the License, or (at your option) any later version. 22 * 2 of the License, or (at your option) any later version.
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h>
25#include "ccid.h" 26#include "ccid.h"
26#include "feat.h" 27#include "feat.h"
27 28
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 7648f316310f..9ec717426024 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15 16
16#include <net/sock.h> 17#include <net/sock.h>
17 18
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index dad7bc4878e0..52ffa1cde15a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/icmp.h> 14#include <linux/icmp.h>
15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17#include <linux/random.h> 18#include <linux/random.h>
@@ -996,16 +997,16 @@ static struct inet_protosw dccp_v4_protosw = {
996 .flags = INET_PROTOSW_ICSK, 997 .flags = INET_PROTOSW_ICSK,
997}; 998};
998 999
999static int dccp_v4_init_net(struct net *net) 1000static int __net_init dccp_v4_init_net(struct net *net)
1000{ 1001{
1001 int err; 1002 if (dccp_hashinfo.bhash == NULL)
1003 return -ESOCKTNOSUPPORT;
1002 1004
1003 err = inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, 1005 return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET,
1004 SOCK_DCCP, IPPROTO_DCCP, net); 1006 SOCK_DCCP, IPPROTO_DCCP, net);
1005 return err;
1006} 1007}
1007 1008
1008static void dccp_v4_exit_net(struct net *net) 1009static void __net_exit dccp_v4_exit_net(struct net *net)
1009{ 1010{
1010 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk); 1011 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
1011} 1012}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index baf05cf43c28..3b11e41a2929 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/random.h> 16#include <linux/random.h>
17#include <linux/slab.h>
17#include <linux/xfrm.h> 18#include <linux/xfrm.h>
18 19
19#include <net/addrconf.h> 20#include <net/addrconf.h>
@@ -1189,16 +1190,16 @@ static struct inet_protosw dccp_v6_protosw = {
1189 .flags = INET_PROTOSW_ICSK, 1190 .flags = INET_PROTOSW_ICSK,
1190}; 1191};
1191 1192
1192static int dccp_v6_init_net(struct net *net) 1193static int __net_init dccp_v6_init_net(struct net *net)
1193{ 1194{
1194 int err; 1195 if (dccp_hashinfo.bhash == NULL)
1196 return -ESOCKTNOSUPPORT;
1195 1197
1196 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, 1198 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1197 SOCK_DCCP, IPPROTO_DCCP, net); 1199 SOCK_DCCP, IPPROTO_DCCP, net);
1198 return err;
1199} 1200}
1200 1201
1201static void dccp_v6_exit_net(struct net *net) 1202static void __net_exit dccp_v6_exit_net(struct net *net)
1202{ 1203{
1203 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); 1204 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1204} 1205}
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index af226a063141..128b089d3aef 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/gfp.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/skbuff.h> 16#include <linux/skbuff.h>
16#include <linux/timer.h> 17#include <linux/timer.h>
@@ -254,7 +255,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
254 * in main socket hash table and lock on listening 255 * in main socket hash table and lock on listening
255 * socket does not protect us more. 256 * socket does not protect us more.
256 */ 257 */
257 sk_add_backlog(child, skb); 258 __sk_add_backlog(child, skb);
258 } 259 }
259 260
260 bh_unlock_sock(child); 261 bh_unlock_sock(child);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index d6bb753bf6ad..fc3f436440b4 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -13,6 +13,7 @@
13#include <linux/dccp.h> 13#include <linux/dccp.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/slab.h>
16 17
17#include <net/inet_sock.h> 18#include <net/inet_sock.h>
18#include <net/sock.h> 19#include <net/sock.h>
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index f5b3464f1242..078e48d442fd 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/kfifo.h> 31#include <linux/kfifo.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/gfp.h>
33#include <net/net_namespace.h> 34#include <net/net_namespace.h>
34 35
35#include "dccp.h" 36#include "dccp.h"
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 671cd1413d59..a0e38d8018f5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -20,6 +20,7 @@
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/slab.h>
23#include <net/checksum.h> 24#include <net/checksum.h>
24 25
25#include <net/inet_sock.h> 26#include <net/inet_sock.h>
@@ -835,6 +836,8 @@ verify_sock_status:
835 len = -EFAULT; 836 len = -EFAULT;
836 break; 837 break;
837 } 838 }
839 if (flags & MSG_TRUNC)
840 len = skb->len;
838 found_fin_ok: 841 found_fin_ok:
839 if (!(flags & MSG_PEEK)) 842 if (!(flags & MSG_PEEK))
840 sk_eat_skb(sk, skb, 0); 843 sk_eat_skb(sk, skb, 0);
@@ -1003,12 +1006,13 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
1003 1006
1004static inline int dccp_mib_init(void) 1007static inline int dccp_mib_init(void)
1005{ 1008{
1006 return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib)); 1009 return snmp_mib_init((void __percpu **)dccp_statistics,
1010 sizeof(struct dccp_mib));
1007} 1011}
1008 1012
1009static inline void dccp_mib_exit(void) 1013static inline void dccp_mib_exit(void)
1010{ 1014{
1011 snmp_mib_free((void**)dccp_statistics); 1015 snmp_mib_free((void __percpu **)dccp_statistics);
1012} 1016}
1013 1017
1014static int thash_entries; 1018static int thash_entries;
@@ -1033,7 +1037,7 @@ static int __init dccp_init(void)
1033 FIELD_SIZEOF(struct sk_buff, cb)); 1037 FIELD_SIZEOF(struct sk_buff, cb));
1034 rc = percpu_counter_init(&dccp_orphan_count, 0); 1038 rc = percpu_counter_init(&dccp_orphan_count, 0);
1035 if (rc) 1039 if (rc)
1036 goto out; 1040 goto out_fail;
1037 rc = -ENOBUFS; 1041 rc = -ENOBUFS;
1038 inet_hashinfo_init(&dccp_hashinfo); 1042 inet_hashinfo_init(&dccp_hashinfo);
1039 dccp_hashinfo.bind_bucket_cachep = 1043 dccp_hashinfo.bind_bucket_cachep =
@@ -1122,8 +1126,9 @@ static int __init dccp_init(void)
1122 goto out_sysctl_exit; 1126 goto out_sysctl_exit;
1123 1127
1124 dccp_timestamping_init(); 1128 dccp_timestamping_init();
1125out: 1129
1126 return rc; 1130 return 0;
1131
1127out_sysctl_exit: 1132out_sysctl_exit:
1128 dccp_sysctl_exit(); 1133 dccp_sysctl_exit();
1129out_ackvec_exit: 1134out_ackvec_exit:
@@ -1132,18 +1137,19 @@ out_free_dccp_mib:
1132 dccp_mib_exit(); 1137 dccp_mib_exit();
1133out_free_dccp_bhash: 1138out_free_dccp_bhash:
1134 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); 1139 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1135 dccp_hashinfo.bhash = NULL;
1136out_free_dccp_locks: 1140out_free_dccp_locks:
1137 inet_ehash_locks_free(&dccp_hashinfo); 1141 inet_ehash_locks_free(&dccp_hashinfo);
1138out_free_dccp_ehash: 1142out_free_dccp_ehash:
1139 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); 1143 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1140 dccp_hashinfo.ehash = NULL;
1141out_free_bind_bucket_cachep: 1144out_free_bind_bucket_cachep:
1142 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1145 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1143 dccp_hashinfo.bind_bucket_cachep = NULL;
1144out_free_percpu: 1146out_free_percpu:
1145 percpu_counter_destroy(&dccp_orphan_count); 1147 percpu_counter_destroy(&dccp_orphan_count);
1146 goto out; 1148out_fail:
1149 dccp_hashinfo.bhash = NULL;
1150 dccp_hashinfo.ehash = NULL;
1151 dccp_hashinfo.bind_bucket_cachep = NULL;
1152 return rc;
1147} 1153}
1148 1154
1149static void __exit dccp_fini(void) 1155static void __exit dccp_fini(void)
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 238af093495b..cead68eb254c 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -40,6 +40,7 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/sysctl.h> 41#include <linux/sysctl.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/slab.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44#include <asm/system.h> 45#include <asm/system.h>
45#include <net/net_namespace.h> 46#include <net/net_namespace.h>
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index e9d48700e83a..4ab96c15166d 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/net.h> 21#include <linux/net.h>
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/slab.h>
23#include <linux/sockios.h> 24#include <linux/sockios.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/skbuff.h> 26#include <linux/skbuff.h>
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 794b5bf95af1..deb723dba44b 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/if_arp.h> 30#include <linux/if_arp.h>
31#include <linux/slab.h>
31#include <linux/if_ether.h> 32#include <linux/if_ether.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 932408dca86d..25a37299bc65 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -57,6 +57,7 @@
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/route.h> 59#include <linux/route.h>
60#include <linux/slab.h>
60#include <net/sock.h> 61#include <net/sock.h>
61#include <net/tcp_states.h> 62#include <net/tcp_states.h>
62#include <asm/system.h> 63#include <asm/system.h>
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index a65e929ce76c..baeb1eaf011b 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -50,6 +50,7 @@
50#include <linux/netdevice.h> 50#include <linux/netdevice.h>
51#include <linux/inet.h> 51#include <linux/inet.h>
52#include <linux/route.h> 52#include <linux/route.h>
53#include <linux/slab.h>
53#include <net/sock.h> 54#include <net/sock.h>
54#include <asm/system.h> 55#include <asm/system.h>
55#include <linux/fcntl.h> 56#include <linux/fcntl.h>
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a03284061a31..70ebe74027d5 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -66,6 +66,7 @@
66#include <linux/inet.h> 66#include <linux/inet.h>
67#include <linux/route.h> 67#include <linux/route.h>
68#include <linux/in_route.h> 68#include <linux/in_route.h>
69#include <linux/slab.h>
69#include <net/sock.h> 70#include <net/sock.h>
70#include <linux/mm.h> 71#include <linux/mm.h>
71#include <linux/proc_fs.h> 72#include <linux/proc_fs.h>
@@ -1155,8 +1156,8 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1155 1156
1156 if (!(flags & MSG_TRYHARD)) { 1157 if (!(flags & MSG_TRYHARD)) {
1157 rcu_read_lock_bh(); 1158 rcu_read_lock_bh();
1158 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1159 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
1159 rt = rcu_dereference(rt->u.dst.dn_next)) { 1160 rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
1160 if ((flp->fld_dst == rt->fl.fld_dst) && 1161 if ((flp->fld_dst == rt->fl.fld_dst) &&
1161 (flp->fld_src == rt->fl.fld_src) && 1162 (flp->fld_src == rt->fl.fld_src) &&
1162 (flp->mark == rt->fl.mark) && 1163 (flp->mark == rt->fl.mark) &&
@@ -1618,9 +1619,9 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1618 if (h > s_h) 1619 if (h > s_h)
1619 s_idx = 0; 1620 s_idx = 0;
1620 rcu_read_lock_bh(); 1621 rcu_read_lock_bh();
1621 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1622 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
1622 rt; 1623 rt;
1623 rt = rcu_dereference(rt->u.dst.dn_next), idx++) { 1624 rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
1624 if (idx < s_idx) 1625 if (idx < s_idx)
1625 continue; 1626 continue;
1626 skb_dst_set(skb, dst_clone(&rt->u.dst)); 1627 skb_dst_set(skb, dst_clone(&rt->u.dst));
@@ -1654,12 +1655,12 @@ static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1654 1655
1655 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1656 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1656 rcu_read_lock_bh(); 1657 rcu_read_lock_bh();
1657 rt = dn_rt_hash_table[s->bucket].chain; 1658 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1658 if (rt) 1659 if (rt)
1659 break; 1660 break;
1660 rcu_read_unlock_bh(); 1661 rcu_read_unlock_bh();
1661 } 1662 }
1662 return rcu_dereference(rt); 1663 return rt;
1663} 1664}
1664 1665
1665static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1666static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
@@ -1674,7 +1675,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1674 rcu_read_lock_bh(); 1675 rcu_read_lock_bh();
1675 rt = dn_rt_hash_table[s->bucket].chain; 1676 rt = dn_rt_hash_table[s->bucket].chain;
1676 } 1677 }
1677 return rcu_dereference(rt); 1678 return rcu_dereference_bh(rt);
1678} 1679}
1679 1680
1680static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1681static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index b9a33bb5e9cc..f2abd3755690 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/net.h> 16#include <linux/net.h>
17#include <linux/socket.h> 17#include <linux/socket.h>
18#include <linux/slab.h>
18#include <linux/sockios.h> 19#include <linux/sockios.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/skbuff.h> 21#include <linux/skbuff.h>
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 6d2bd3202048..64a7f39e069f 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/slab.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/netdevice.h> 19#include <linux/netdevice.h>
19#include <linux/netfilter.h> 20#include <linux/netfilter.h>
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 71489f69a42c..6112a12578b2 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/slab.h>
14#include <net/dsa.h> 15#include <net/dsa.h>
15#include "dsa_priv.h" 16#include "dsa_priv.h"
16 17
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index cdf2d28a0297..98dfe80b4538 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -11,6 +11,7 @@
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/slab.h>
14#include "dsa_priv.h" 15#include "dsa_priv.h"
15 16
16#define DSA_HLEN 4 17#define DSA_HLEN 4
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 8f53948cff4f..6f383322ad25 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -11,6 +11,7 @@
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/slab.h>
14#include "dsa_priv.h" 15#include "dsa_priv.h"
15 16
16#define DSA_HLEN 4 17#define DSA_HLEN 4
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index a85c829853c0..d6d7d0add3cb 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -11,6 +11,7 @@
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/slab.h>
14#include "dsa_priv.h" 15#include "dsa_priv.h"
15 16
16netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) 17netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 29b4931aae52..2a5a8053e000 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -30,6 +30,7 @@
30#include <linux/wireless.h> 30#include <linux/wireless.h>
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/udp.h> 32#include <linux/udp.h>
33#include <linux/slab.h>
33#include <net/sock.h> 34#include <net/sock.h>
34#include <net/inet_common.h> 35#include <net/inet_common.h>
35#include <linux/stat.h> 36#include <linux/stat.h>
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index dd3db88f8f0a..205a1c12f3c0 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -73,8 +73,8 @@ __setup("ether=", netdev_boot_setup);
73 * @len: packet length (<= skb->len) 73 * @len: packet length (<= skb->len)
74 * 74 *
75 * 75 *
76 * Set the protocol type. For a packet of type ETH_P_802_3 we put the length 76 * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
77 * in here instead. It is up to the 802.2 layer to carry protocol information. 77 * in here instead.
78 */ 78 */
79int eth_header(struct sk_buff *skb, struct net_device *dev, 79int eth_header(struct sk_buff *skb, struct net_device *dev,
80 unsigned short type, 80 unsigned short type,
@@ -82,7 +82,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
82{ 82{
83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); 83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
84 84
85 if (type != ETH_P_802_3) 85 if (type != ETH_P_802_3 && type != ETH_P_802_2)
86 eth->h_proto = htons(type); 86 eth->h_proto = htons(type);
87 else 87 else
88 eth->h_proto = htons(len); 88 eth->h_proto = htons(len);
diff --git a/net/ethernet/pe2.c b/net/ethernet/pe2.c
index d60e15d9365e..eb00796758c3 100644
--- a/net/ethernet/pe2.c
+++ b/net/ethernet/pe2.c
@@ -3,6 +3,7 @@
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/skbuff.h> 5#include <linux/skbuff.h>
6#include <linux/slab.h>
6 7
7#include <net/datalink.h> 8#include <net/datalink.h>
8 9
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index bad1c49fd960..93c91b633a56 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -28,6 +28,7 @@
28#include <linux/if.h> 28#include <linux/if.h>
29#include <linux/termios.h> /* For TIOCOUTQ/INQ */ 29#include <linux/termios.h> /* For TIOCOUTQ/INQ */
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/slab.h>
31#include <net/datalink.h> 32#include <net/datalink.h>
32#include <net/psnap.h> 33#include <net/psnap.h>
33#include <net/sock.h> 34#include <net/sock.h>
@@ -126,6 +127,9 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
126{ 127{
127 struct sock *sk = sock->sk; 128 struct sock *sk = sock->sk;
128 129
130 if (addr_len < sizeof(uaddr->sa_family))
131 return -EINVAL;
132
129 if (uaddr->sa_family == AF_UNSPEC) 133 if (uaddr->sa_family == AF_UNSPEC)
130 return sk->sk_prot->disconnect(sk, flags); 134 return sk->sk_prot->disconnect(sk, flags);
131 135
@@ -147,6 +151,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
147 dev_load(sock_net(sk), ifr.ifr_name); 151 dev_load(sock_net(sk), ifr.ifr_name);
148 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); 152 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
149 153
154 if (!dev)
155 return -ENODEV;
156
150 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) 157 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
151 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); 158 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
152 159
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 9aac5aee1575..1a3334c2609a 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/if_arp.h> 26#include <linux/if_arp.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/af_ieee802154.h> 30#include <net/af_ieee802154.h>
30#include <net/ieee802154.h> 31#include <net/ieee802154.h>
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 33137b99e471..c8097ae2482f 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/gfp.h>
26#include <net/genetlink.h> 27#include <net/genetlink.h>
27#include <linux/nl802154.h> 28#include <linux/nl802154.h>
28 29
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 135c1678fb11..71ee1108d4f8 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -22,6 +22,7 @@
22 * Maxim Osipov <maxim.osipov@siemens.com> 22 * Maxim Osipov <maxim.osipov@siemens.com>
23 */ 23 */
24 24
25#include <linux/gfp.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/if_arp.h> 27#include <linux/if_arp.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 199a2d9d12f9..ed0eab39f531 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <net/netlink.h> 27#include <net/netlink.h>
27#include <net/genetlink.h> 28#include <net/genetlink.h>
28#include <net/wpan-phy.h> 29#include <net/wpan-phy.h>
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 9c9b85c00033..10970ca85748 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/if_arp.h> 26#include <linux/if_arp.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/af_ieee802154.h> 30#include <net/af_ieee802154.h>
30 31
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 268691256a6d..3d803a1b9fb6 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -16,6 +16,7 @@
16 * 16 *
17 */ 17 */
18 18
19#include <linux/slab.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/device.h> 22#include <linux/device.h>
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7d12c6a9b19b..f71357422380 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -86,6 +86,7 @@
86#include <linux/poll.h> 86#include <linux/poll.h>
87#include <linux/netfilter_ipv4.h> 87#include <linux/netfilter_ipv4.h>
88#include <linux/random.h> 88#include <linux/random.h>
89#include <linux/slab.h>
89 90
90#include <asm/uaccess.h> 91#include <asm/uaccess.h>
91#include <asm/system.h> 92#include <asm/system.h>
@@ -530,6 +531,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
530{ 531{
531 struct sock *sk = sock->sk; 532 struct sock *sk = sock->sk;
532 533
534 if (addr_len < sizeof(uaddr->sa_family))
535 return -EINVAL;
533 if (uaddr->sa_family == AF_UNSPEC) 536 if (uaddr->sa_family == AF_UNSPEC)
534 return sk->sk_prot->disconnect(sk, flags); 537 return sk->sk_prot->disconnect(sk, flags);
535 538
@@ -573,6 +576,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
573 int err; 576 int err;
574 long timeo; 577 long timeo;
575 578
579 if (addr_len < sizeof(uaddr->sa_family))
580 return -EINVAL;
581
576 lock_sock(sk); 582 lock_sock(sk);
577 583
578 if (uaddr->sa_family == AF_UNSPEC) { 584 if (uaddr->sa_family == AF_UNSPEC) {
@@ -1385,7 +1391,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1385} 1391}
1386EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1392EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1387 1393
1388unsigned long snmp_fold_field(void *mib[], int offt) 1394unsigned long snmp_fold_field(void __percpu *mib[], int offt)
1389{ 1395{
1390 unsigned long res = 0; 1396 unsigned long res = 0;
1391 int i; 1397 int i;
@@ -1398,7 +1404,7 @@ unsigned long snmp_fold_field(void *mib[], int offt)
1398} 1404}
1399EXPORT_SYMBOL_GPL(snmp_fold_field); 1405EXPORT_SYMBOL_GPL(snmp_fold_field);
1400 1406
1401int snmp_mib_init(void *ptr[2], size_t mibsize) 1407int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
1402{ 1408{
1403 BUG_ON(ptr == NULL); 1409 BUG_ON(ptr == NULL);
1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1410 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
@@ -1416,7 +1422,7 @@ err0:
1416} 1422}
1417EXPORT_SYMBOL_GPL(snmp_mib_init); 1423EXPORT_SYMBOL_GPL(snmp_mib_init);
1418 1424
1419void snmp_mib_free(void *ptr[2]) 1425void snmp_mib_free(void __percpu *ptr[2])
1420{ 1426{
1421 BUG_ON(ptr == NULL); 1427 BUG_ON(ptr == NULL);
1422 free_percpu(ptr[0]); 1428 free_percpu(ptr[0]);
@@ -1460,25 +1466,25 @@ static const struct net_protocol icmp_protocol = {
1460 1466
1461static __net_init int ipv4_mib_init_net(struct net *net) 1467static __net_init int ipv4_mib_init_net(struct net *net)
1462{ 1468{
1463 if (snmp_mib_init((void **)net->mib.tcp_statistics, 1469 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
1464 sizeof(struct tcp_mib)) < 0) 1470 sizeof(struct tcp_mib)) < 0)
1465 goto err_tcp_mib; 1471 goto err_tcp_mib;
1466 if (snmp_mib_init((void **)net->mib.ip_statistics, 1472 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
1467 sizeof(struct ipstats_mib)) < 0) 1473 sizeof(struct ipstats_mib)) < 0)
1468 goto err_ip_mib; 1474 goto err_ip_mib;
1469 if (snmp_mib_init((void **)net->mib.net_statistics, 1475 if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
1470 sizeof(struct linux_mib)) < 0) 1476 sizeof(struct linux_mib)) < 0)
1471 goto err_net_mib; 1477 goto err_net_mib;
1472 if (snmp_mib_init((void **)net->mib.udp_statistics, 1478 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
1473 sizeof(struct udp_mib)) < 0) 1479 sizeof(struct udp_mib)) < 0)
1474 goto err_udp_mib; 1480 goto err_udp_mib;
1475 if (snmp_mib_init((void **)net->mib.udplite_statistics, 1481 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
1476 sizeof(struct udp_mib)) < 0) 1482 sizeof(struct udp_mib)) < 0)
1477 goto err_udplite_mib; 1483 goto err_udplite_mib;
1478 if (snmp_mib_init((void **)net->mib.icmp_statistics, 1484 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
1479 sizeof(struct icmp_mib)) < 0) 1485 sizeof(struct icmp_mib)) < 0)
1480 goto err_icmp_mib; 1486 goto err_icmp_mib;
1481 if (snmp_mib_init((void **)net->mib.icmpmsg_statistics, 1487 if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
1482 sizeof(struct icmpmsg_mib)) < 0) 1488 sizeof(struct icmpmsg_mib)) < 0)
1483 goto err_icmpmsg_mib; 1489 goto err_icmpmsg_mib;
1484 1490
@@ -1486,30 +1492,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
1486 return 0; 1492 return 0;
1487 1493
1488err_icmpmsg_mib: 1494err_icmpmsg_mib:
1489 snmp_mib_free((void **)net->mib.icmp_statistics); 1495 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1490err_icmp_mib: 1496err_icmp_mib:
1491 snmp_mib_free((void **)net->mib.udplite_statistics); 1497 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1492err_udplite_mib: 1498err_udplite_mib:
1493 snmp_mib_free((void **)net->mib.udp_statistics); 1499 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1494err_udp_mib: 1500err_udp_mib:
1495 snmp_mib_free((void **)net->mib.net_statistics); 1501 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1496err_net_mib: 1502err_net_mib:
1497 snmp_mib_free((void **)net->mib.ip_statistics); 1503 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1498err_ip_mib: 1504err_ip_mib:
1499 snmp_mib_free((void **)net->mib.tcp_statistics); 1505 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1500err_tcp_mib: 1506err_tcp_mib:
1501 return -ENOMEM; 1507 return -ENOMEM;
1502} 1508}
1503 1509
1504static __net_exit void ipv4_mib_exit_net(struct net *net) 1510static __net_exit void ipv4_mib_exit_net(struct net *net)
1505{ 1511{
1506 snmp_mib_free((void **)net->mib.icmpmsg_statistics); 1512 snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
1507 snmp_mib_free((void **)net->mib.icmp_statistics); 1513 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1508 snmp_mib_free((void **)net->mib.udplite_statistics); 1514 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1509 snmp_mib_free((void **)net->mib.udp_statistics); 1515 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1510 snmp_mib_free((void **)net->mib.net_statistics); 1516 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1511 snmp_mib_free((void **)net->mib.ip_statistics); 1517 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1512 snmp_mib_free((void **)net->mib.tcp_statistics); 1518 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1513} 1519}
1514 1520
1515static __net_initdata struct pernet_operations ipv4_mib_ops = { 1521static __net_initdata struct pernet_operations ipv4_mib_ops = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7ed3e4ae93ae..880a5ec6dce0 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -1,6 +1,7 @@
1#include <crypto/hash.h> 1#include <crypto/hash.h>
2#include <linux/err.h> 2#include <linux/err.h>
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h>
4#include <net/ip.h> 5#include <net/ip.h>
5#include <net/xfrm.h> 6#include <net/xfrm.h>
6#include <net/ah.h> 7#include <net/ah.h>
@@ -393,7 +394,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
393 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 394 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
394 return; 395 return;
395 396
396 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); 397 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
397 if (!x) 398 if (!x)
398 return; 399 return;
399 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 400 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93acf29..6e747065c202 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
70 * bonding can change the skb before 70 * bonding can change the skb before
71 * sending (e.g. insert 8021q tag). 71 * sending (e.g. insert 8021q tag).
72 * Harald Welte : convert to make use of jenkins hash 72 * Harald Welte : convert to make use of jenkins hash
73 * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
73 */ 74 */
74 75
75#include <linux/module.h> 76#include <linux/module.h>
@@ -97,6 +98,7 @@
97#include <linux/net.h> 98#include <linux/net.h>
98#include <linux/rcupdate.h> 99#include <linux/rcupdate.h>
99#include <linux/jhash.h> 100#include <linux/jhash.h>
101#include <linux/slab.h>
100#ifdef CONFIG_SYSCTL 102#ifdef CONFIG_SYSCTL
101#include <linux/sysctl.h> 103#include <linux/sysctl.h>
102#endif 104#endif
@@ -524,12 +526,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
524/* 526/*
525 * Check if we can use proxy ARP for this path 527 * Check if we can use proxy ARP for this path
526 */ 528 */
527 529static inline int arp_fwd_proxy(struct in_device *in_dev,
528static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) 530 struct net_device *dev, struct rtable *rt)
529{ 531{
530 struct in_device *out_dev; 532 struct in_device *out_dev;
531 int imi, omi = -1; 533 int imi, omi = -1;
532 534
535 if (rt->u.dst.dev == dev)
536 return 0;
537
533 if (!IN_DEV_PROXY_ARP(in_dev)) 538 if (!IN_DEV_PROXY_ARP(in_dev))
534 return 0; 539 return 0;
535 540
@@ -548,6 +553,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
548} 553}
549 554
550/* 555/*
556 * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
557 *
558 * RFC3069 supports proxy arp replies back to the same interface. This
559 * is done to support (ethernet) switch features, like RFC 3069, where
560 * the individual ports are not allowed to communicate with each
561 * other, BUT they are allowed to talk to the upstream router. As
562 * described in RFC 3069, it is possible to allow these hosts to
563 * communicate through the upstream router, by proxy_arp'ing.
564 *
565 * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
566 *
567 * This technology is known by different names:
568 * In RFC 3069 it is called VLAN Aggregation.
569 * Cisco and Allied Telesyn call it Private VLAN.
570 * Hewlett-Packard call it Source-Port filtering or port-isolation.
571 * Ericsson call it MAC-Forced Forwarding (RFC Draft).
572 *
573 */
574static inline int arp_fwd_pvlan(struct in_device *in_dev,
575 struct net_device *dev, struct rtable *rt,
576 __be32 sip, __be32 tip)
577{
578 /* Private VLAN is only concerned about the same ethernet segment */
579 if (rt->u.dst.dev != dev)
580 return 0;
581
582 /* Don't reply on self probes (often done by windowz boxes)*/
583 if (sip == tip)
584 return 0;
585
586 if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
587 return 1;
588 else
589 return 0;
590}
591
592/*
551 * Interface to link layer: send routine and receive handler. 593 * Interface to link layer: send routine and receive handler.
552 */ 594 */
553 595
@@ -833,8 +875,11 @@ static int arp_process(struct sk_buff *skb)
833 } 875 }
834 goto out; 876 goto out;
835 } else if (IN_DEV_FORWARD(in_dev)) { 877 } else if (IN_DEV_FORWARD(in_dev)) {
836 if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && 878 if (addr_type == RTN_UNICAST &&
837 (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 879 (arp_fwd_proxy(in_dev, dev, rt) ||
880 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
881 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
882 {
838 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 883 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
839 if (n) 884 if (n)
840 neigh_release(n); 885 neigh_release(n);
@@ -863,7 +908,8 @@ static int arp_process(struct sk_buff *skb)
863 devices (strip is candidate) 908 devices (strip is candidate)
864 */ 909 */
865 if (n == NULL && 910 if (n == NULL &&
866 arp->ar_op == htons(ARPOP_REPLY) && 911 (arp->ar_op == htons(ARPOP_REPLY) ||
912 (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
867 inet_addr_type(net, sip) == RTN_UNICAST) 913 inet_addr_type(net, sip) == RTN_UNICAST)
868 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 914 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
869 } 915 }
@@ -1239,8 +1285,7 @@ void __init arp_init(void)
1239 dev_add_pack(&arp_packet_type); 1285 dev_add_pack(&arp_packet_type);
1240 arp_proc_init(); 1286 arp_proc_init();
1241#ifdef CONFIG_SYSCTL 1287#ifdef CONFIG_SYSCTL
1242 neigh_sysctl_register(NULL, &arp_tbl.parms, NET_IPV4, 1288 neigh_sysctl_register(NULL, &arp_tbl.parms, "ipv4", NULL);
1243 NET_IPV4_NEIGH, "ipv4", NULL);
1244#endif 1289#endif
1245 register_netdevice_notifier(&arp_netdev_notifier); 1290 register_netdevice_notifier(&arp_netdev_notifier);
1246} 1291}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 1e029dc75455..c97cd9ff697e 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -44,6 +44,7 @@
44#include <linux/string.h> 44#include <linux/string.h>
45#include <linux/jhash.h> 45#include <linux/jhash.h>
46#include <linux/audit.h> 46#include <linux/audit.h>
47#include <linux/slab.h>
47#include <net/ip.h> 48#include <net/ip.h>
48#include <net/icmp.h> 49#include <net/icmp.h>
49#include <net/tcp.h> 50#include <net/tcp.h>
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 26dec2be9615..90e3d6379a42 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -50,6 +50,7 @@
50#include <linux/notifier.h> 50#include <linux/notifier.h>
51#include <linux/inetdevice.h> 51#include <linux/inetdevice.h>
52#include <linux/igmp.h> 52#include <linux/igmp.h>
53#include <linux/slab.h>
53#ifdef CONFIG_SYSCTL 54#ifdef CONFIG_SYSCTL
54#include <linux/sysctl.h> 55#include <linux/sysctl.h>
55#endif 56#endif
@@ -64,20 +65,20 @@
64 65
65static struct ipv4_devconf ipv4_devconf = { 66static struct ipv4_devconf ipv4_devconf = {
66 .data = { 67 .data = {
67 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1, 68 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
68 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1, 69 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
69 [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1, 70 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
70 [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1, 71 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
71 }, 72 },
72}; 73};
73 74
74static struct ipv4_devconf ipv4_devconf_dflt = { 75static struct ipv4_devconf ipv4_devconf_dflt = {
75 .data = { 76 .data = {
76 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1, 77 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
77 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1, 78 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
78 [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1, 79 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
79 [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1, 80 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
80 [NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE - 1] = 1, 81 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
81 }, 82 },
82}; 83};
83 84
@@ -1194,7 +1195,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1195 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1195 if (idx < s_idx) 1196 if (idx < s_idx)
1196 goto cont; 1197 goto cont;
1197 if (idx > s_idx) 1198 if (h > s_h || idx > s_idx)
1198 s_ip_idx = 0; 1199 s_ip_idx = 0;
1199 in_dev = __in_dev_get_rcu(dev); 1200 in_dev = __in_dev_get_rcu(dev);
1200 if (!in_dev) 1201 if (!in_dev)
@@ -1365,7 +1366,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1365 { \ 1366 { \
1366 .procname = name, \ 1367 .procname = name, \
1367 .data = ipv4_devconf.data + \ 1368 .data = ipv4_devconf.data + \
1368 NET_IPV4_CONF_ ## attr - 1, \ 1369 IPV4_DEVCONF_ ## attr - 1, \
1369 .maxlen = sizeof(int), \ 1370 .maxlen = sizeof(int), \
1370 .mode = mval, \ 1371 .mode = mval, \
1371 .proc_handler = proc, \ 1372 .proc_handler = proc, \
@@ -1386,7 +1387,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1386 1387
1387static struct devinet_sysctl_table { 1388static struct devinet_sysctl_table {
1388 struct ctl_table_header *sysctl_header; 1389 struct ctl_table_header *sysctl_header;
1389 struct ctl_table devinet_vars[__NET_IPV4_CONF_MAX]; 1390 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
1390 char *dev_name; 1391 char *dev_name;
1391} devinet_sysctl = { 1392} devinet_sysctl = {
1392 .devinet_vars = { 1393 .devinet_vars = {
@@ -1413,6 +1414,7 @@ static struct devinet_sysctl_table {
1413 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), 1414 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
1414 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), 1415 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
1415 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), 1416 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
1417 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
1416 1418
1417 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), 1419 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
1418 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), 1420 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
@@ -1491,8 +1493,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
1491 1493
1492static void devinet_sysctl_register(struct in_device *idev) 1494static void devinet_sysctl_register(struct in_device *idev)
1493{ 1495{
1494 neigh_sysctl_register(idev->dev, idev->arp_parms, NET_IPV4, 1496 neigh_sysctl_register(idev->dev, idev->arp_parms, "ipv4", NULL);
1495 NET_IPV4_NEIGH, "ipv4", NULL);
1496 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name, 1497 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
1497 &idev->cnf); 1498 &idev->cnf);
1498} 1499}
@@ -1507,7 +1508,7 @@ static struct ctl_table ctl_forward_entry[] = {
1507 { 1508 {
1508 .procname = "ip_forward", 1509 .procname = "ip_forward",
1509 .data = &ipv4_devconf.data[ 1510 .data = &ipv4_devconf.data[
1510 NET_IPV4_CONF_FORWARDING - 1], 1511 IPV4_DEVCONF_FORWARDING - 1],
1511 .maxlen = sizeof(int), 1512 .maxlen = sizeof(int),
1512 .mode = 0644, 1513 .mode = 0644,
1513 .proc_handler = devinet_sysctl_forward, 1514 .proc_handler = devinet_sysctl_forward,
@@ -1551,7 +1552,7 @@ static __net_init int devinet_init_net(struct net *net)
1551 if (tbl == NULL) 1552 if (tbl == NULL)
1552 goto err_alloc_ctl; 1553 goto err_alloc_ctl;
1553 1554
1554 tbl[0].data = &all->data[NET_IPV4_CONF_FORWARDING - 1]; 1555 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
1555 tbl[0].extra1 = all; 1556 tbl[0].extra1 = all;
1556 tbl[0].extra2 = net; 1557 tbl[0].extra2 = net;
1557#endif 1558#endif
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 1948895beb6d..14ca1f1c3fb0 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -422,7 +422,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
422 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 422 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
423 return; 423 return;
424 424
425 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 425 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
426 if (!x) 426 if (!x)
427 return; 427 return;
428 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 428 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 82dbf711d6d0..4f0ed458c883 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -34,6 +34,7 @@
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/slab.h>
37 38
38#include <net/ip.h> 39#include <net/ip.h>
39#include <net/protocol.h> 40#include <net/protocol.h>
@@ -883,7 +884,7 @@ static void nl_fib_input(struct sk_buff *skb)
883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); 884 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
884} 885}
885 886
886static int nl_fib_lookup_init(struct net *net) 887static int __net_init nl_fib_lookup_init(struct net *net)
887{ 888{
888 struct sock *sk; 889 struct sock *sk;
889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, 890 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
@@ -1004,7 +1005,7 @@ fail:
1004 return err; 1005 return err;
1005} 1006}
1006 1007
1007static void __net_exit ip_fib_net_exit(struct net *net) 1008static void ip_fib_net_exit(struct net *net)
1008{ 1009{
1009 unsigned int i; 1010 unsigned int i;
1010 1011
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 14972017b9c2..4ed7e0dea1bc 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -32,6 +32,7 @@
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33#include <linux/netlink.h> 33#include <linux/netlink.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/slab.h>
35 36
36#include <net/net_namespace.h> 37#include <net/net_namespace.h>
37#include <net/ip.h> 38#include <net/ip.h>
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ed19aa6919c2..20f09c5b31e8 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/slab.h>
35 36
36#include <net/arp.h> 37#include <net/arp.h>
37#include <net/ip.h> 38#include <net/ip.h>
@@ -62,8 +63,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \ 63#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 64for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
64 65
65#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ 66#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
66for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) 67for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
67 68
68#else /* CONFIG_IP_ROUTE_MULTIPATH */ 69#else /* CONFIG_IP_ROUTE_MULTIPATH */
69 70
@@ -72,7 +73,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++,
72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ 73#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
73for (nhsel=0; nhsel < 1; nhsel++) 74for (nhsel=0; nhsel < 1; nhsel++)
74 75
75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ 76#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
76for (nhsel=0; nhsel < 1; nhsel++) 77for (nhsel=0; nhsel < 1; nhsel++)
77 78
78#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 79#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -145,9 +146,9 @@ void free_fib_info(struct fib_info *fi)
145 return; 146 return;
146 } 147 }
147 change_nexthops(fi) { 148 change_nexthops(fi) {
148 if (nh->nh_dev) 149 if (nexthop_nh->nh_dev)
149 dev_put(nh->nh_dev); 150 dev_put(nexthop_nh->nh_dev);
150 nh->nh_dev = NULL; 151 nexthop_nh->nh_dev = NULL;
151 } endfor_nexthops(fi); 152 } endfor_nexthops(fi);
152 fib_info_cnt--; 153 fib_info_cnt--;
153 release_net(fi->fib_net); 154 release_net(fi->fib_net);
@@ -162,9 +163,9 @@ void fib_release_info(struct fib_info *fi)
162 if (fi->fib_prefsrc) 163 if (fi->fib_prefsrc)
163 hlist_del(&fi->fib_lhash); 164 hlist_del(&fi->fib_lhash);
164 change_nexthops(fi) { 165 change_nexthops(fi) {
165 if (!nh->nh_dev) 166 if (!nexthop_nh->nh_dev)
166 continue; 167 continue;
167 hlist_del(&nh->nh_hash); 168 hlist_del(&nexthop_nh->nh_hash);
168 } endfor_nexthops(fi) 169 } endfor_nexthops(fi)
169 fi->fib_dead = 1; 170 fi->fib_dead = 1;
170 fib_info_put(fi); 171 fib_info_put(fi);
@@ -395,19 +396,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
395 if (!rtnh_ok(rtnh, remaining)) 396 if (!rtnh_ok(rtnh, remaining))
396 return -EINVAL; 397 return -EINVAL;
397 398
398 nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 399 nexthop_nh->nh_flags =
399 nh->nh_oif = rtnh->rtnh_ifindex; 400 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
400 nh->nh_weight = rtnh->rtnh_hops + 1; 401 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
402 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
401 403
402 attrlen = rtnh_attrlen(rtnh); 404 attrlen = rtnh_attrlen(rtnh);
403 if (attrlen > 0) { 405 if (attrlen > 0) {
404 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 406 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
405 407
406 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 408 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
407 nh->nh_gw = nla ? nla_get_be32(nla) : 0; 409 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
408#ifdef CONFIG_NET_CLS_ROUTE 410#ifdef CONFIG_NET_CLS_ROUTE
409 nla = nla_find(attrs, attrlen, RTA_FLOW); 411 nla = nla_find(attrs, attrlen, RTA_FLOW);
410 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 412 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
411#endif 413#endif
412 } 414 }
413 415
@@ -527,10 +529,6 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
527 if (nh->nh_gw) { 529 if (nh->nh_gw) {
528 struct fib_result res; 530 struct fib_result res;
529 531
530#ifdef CONFIG_IP_ROUTE_PERVASIVE
531 if (nh->nh_flags&RTNH_F_PERVASIVE)
532 return 0;
533#endif
534 if (nh->nh_flags&RTNH_F_ONLINK) { 532 if (nh->nh_flags&RTNH_F_ONLINK) {
535 struct net_device *dev; 533 struct net_device *dev;
536 534
@@ -738,7 +736,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
738 736
739 fi->fib_nhs = nhs; 737 fi->fib_nhs = nhs;
740 change_nexthops(fi) { 738 change_nexthops(fi) {
741 nh->nh_parent = fi; 739 nexthop_nh->nh_parent = fi;
742 } endfor_nexthops(fi) 740 } endfor_nexthops(fi)
743 741
744 if (cfg->fc_mx) { 742 if (cfg->fc_mx) {
@@ -808,7 +806,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
808 goto failure; 806 goto failure;
809 } else { 807 } else {
810 change_nexthops(fi) { 808 change_nexthops(fi) {
811 if ((err = fib_check_nh(cfg, fi, nh)) != 0) 809 if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
812 goto failure; 810 goto failure;
813 } endfor_nexthops(fi) 811 } endfor_nexthops(fi)
814 } 812 }
@@ -843,11 +841,11 @@ link_it:
843 struct hlist_head *head; 841 struct hlist_head *head;
844 unsigned int hash; 842 unsigned int hash;
845 843
846 if (!nh->nh_dev) 844 if (!nexthop_nh->nh_dev)
847 continue; 845 continue;
848 hash = fib_devindex_hashfn(nh->nh_dev->ifindex); 846 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
849 head = &fib_info_devhash[hash]; 847 head = &fib_info_devhash[hash];
850 hlist_add_head(&nh->nh_hash, head); 848 hlist_add_head(&nexthop_nh->nh_hash, head);
851 } endfor_nexthops(fi) 849 } endfor_nexthops(fi)
852 spin_unlock_bh(&fib_info_lock); 850 spin_unlock_bh(&fib_info_lock);
853 return fi; 851 return fi;
@@ -1080,21 +1078,21 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1080 prev_fi = fi; 1078 prev_fi = fi;
1081 dead = 0; 1079 dead = 0;
1082 change_nexthops(fi) { 1080 change_nexthops(fi) {
1083 if (nh->nh_flags&RTNH_F_DEAD) 1081 if (nexthop_nh->nh_flags&RTNH_F_DEAD)
1084 dead++; 1082 dead++;
1085 else if (nh->nh_dev == dev && 1083 else if (nexthop_nh->nh_dev == dev &&
1086 nh->nh_scope != scope) { 1084 nexthop_nh->nh_scope != scope) {
1087 nh->nh_flags |= RTNH_F_DEAD; 1085 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1088#ifdef CONFIG_IP_ROUTE_MULTIPATH 1086#ifdef CONFIG_IP_ROUTE_MULTIPATH
1089 spin_lock_bh(&fib_multipath_lock); 1087 spin_lock_bh(&fib_multipath_lock);
1090 fi->fib_power -= nh->nh_power; 1088 fi->fib_power -= nexthop_nh->nh_power;
1091 nh->nh_power = 0; 1089 nexthop_nh->nh_power = 0;
1092 spin_unlock_bh(&fib_multipath_lock); 1090 spin_unlock_bh(&fib_multipath_lock);
1093#endif 1091#endif
1094 dead++; 1092 dead++;
1095 } 1093 }
1096#ifdef CONFIG_IP_ROUTE_MULTIPATH 1094#ifdef CONFIG_IP_ROUTE_MULTIPATH
1097 if (force > 1 && nh->nh_dev == dev) { 1095 if (force > 1 && nexthop_nh->nh_dev == dev) {
1098 dead = fi->fib_nhs; 1096 dead = fi->fib_nhs;
1099 break; 1097 break;
1100 } 1098 }
@@ -1144,18 +1142,20 @@ int fib_sync_up(struct net_device *dev)
1144 prev_fi = fi; 1142 prev_fi = fi;
1145 alive = 0; 1143 alive = 0;
1146 change_nexthops(fi) { 1144 change_nexthops(fi) {
1147 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1145 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1148 alive++; 1146 alive++;
1149 continue; 1147 continue;
1150 } 1148 }
1151 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 1149 if (nexthop_nh->nh_dev == NULL ||
1150 !(nexthop_nh->nh_dev->flags&IFF_UP))
1152 continue; 1151 continue;
1153 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) 1152 if (nexthop_nh->nh_dev != dev ||
1153 !__in_dev_get_rtnl(dev))
1154 continue; 1154 continue;
1155 alive++; 1155 alive++;
1156 spin_lock_bh(&fib_multipath_lock); 1156 spin_lock_bh(&fib_multipath_lock);
1157 nh->nh_power = 0; 1157 nexthop_nh->nh_power = 0;
1158 nh->nh_flags &= ~RTNH_F_DEAD; 1158 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1159 spin_unlock_bh(&fib_multipath_lock); 1159 spin_unlock_bh(&fib_multipath_lock);
1160 } endfor_nexthops(fi) 1160 } endfor_nexthops(fi)
1161 1161
@@ -1182,9 +1182,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1182 if (fi->fib_power <= 0) { 1182 if (fi->fib_power <= 0) {
1183 int power = 0; 1183 int power = 0;
1184 change_nexthops(fi) { 1184 change_nexthops(fi) {
1185 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1185 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1186 power += nh->nh_weight; 1186 power += nexthop_nh->nh_weight;
1187 nh->nh_power = nh->nh_weight; 1187 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1188 } 1188 }
1189 } endfor_nexthops(fi); 1189 } endfor_nexthops(fi);
1190 fi->fib_power = power; 1190 fi->fib_power = power;
@@ -1204,9 +1204,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1204 w = jiffies % fi->fib_power; 1204 w = jiffies % fi->fib_power;
1205 1205
1206 change_nexthops(fi) { 1206 change_nexthops(fi) {
1207 if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { 1207 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
1208 if ((w -= nh->nh_power) <= 0) { 1208 nexthop_nh->nh_power) {
1209 nh->nh_power--; 1209 if ((w -= nexthop_nh->nh_power) <= 0) {
1210 nexthop_nh->nh_power--;
1210 fi->fib_power--; 1211 fi->fib_power--;
1211 res->nh_sel = nhsel; 1212 res->nh_sel = nhsel;
1212 spin_unlock_bh(&fib_multipath_lock); 1213 spin_unlock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index af5d89792860..c98f115fb0fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,6 +71,7 @@
71#include <linux/netlink.h> 71#include <linux/netlink.h>
72#include <linux/init.h> 72#include <linux/init.h>
73#include <linux/list.h> 73#include <linux/list.h>
74#include <linux/slab.h>
74#include <net/net_namespace.h> 75#include <net/net_namespace.h>
75#include <net/ip.h> 76#include <net/ip.h>
76#include <net/protocol.h> 77#include <net/protocol.h>
@@ -208,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
208{ 209{
209 struct node *ret = tnode_get_child(tn, i); 210 struct node *ret = tnode_get_child(tn, i);
210 211
211 return rcu_dereference(ret); 212 return rcu_dereference_check(ret,
213 rcu_read_lock_held() ||
214 lockdep_rtnl_is_held());
212} 215}
213 216
214static inline int tnode_child_length(const struct tnode *tn) 217static inline int tnode_child_length(const struct tnode *tn)
@@ -961,7 +964,9 @@ fib_find_node(struct trie *t, u32 key)
961 struct node *n; 964 struct node *n;
962 965
963 pos = 0; 966 pos = 0;
964 n = rcu_dereference(t->trie); 967 n = rcu_dereference_check(t->trie,
968 rcu_read_lock_held() ||
969 lockdep_rtnl_is_held());
965 970
966 while (n != NULL && NODE_TYPE(n) == T_TNODE) { 971 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
967 tn = (struct tnode *) n; 972 tn = (struct tnode *) n;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fe11f60ce41b..ac4dec132735 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -74,6 +74,7 @@
74#include <linux/netdevice.h> 74#include <linux/netdevice.h>
75#include <linux/string.h> 75#include <linux/string.h>
76#include <linux/netfilter_ipv4.h> 76#include <linux/netfilter_ipv4.h>
77#include <linux/slab.h>
77#include <net/snmp.h> 78#include <net/snmp.h>
78#include <net/ip.h> 79#include <net/ip.h>
79#include <net/route.h> 80#include <net/route.h>
@@ -114,7 +115,7 @@ struct icmp_bxm {
114/* An array of errno for error messages from dest unreach. */ 115/* An array of errno for error messages from dest unreach. */
115/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 116/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
116 117
117struct icmp_err icmp_err_convert[] = { 118const struct icmp_err icmp_err_convert[] = {
118 { 119 {
119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ 120 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */
120 .fatal = 0, 121 .fatal = 0,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a42f658e756a..15d3eeda92f5 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -71,6 +71,7 @@
71 */ 71 */
72 72
73#include <linux/module.h> 73#include <linux/module.h>
74#include <linux/slab.h>
74#include <asm/uaccess.h> 75#include <asm/uaccess.h>
75#include <asm/system.h> 76#include <asm/system.h>
76#include <linux/types.h> 77#include <linux/types.h>
@@ -1799,7 +1800,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1799 iml->next = inet->mc_list; 1800 iml->next = inet->mc_list;
1800 iml->sflist = NULL; 1801 iml->sflist = NULL;
1801 iml->sfmode = MCAST_EXCLUDE; 1802 iml->sfmode = MCAST_EXCLUDE;
1802 inet->mc_list = iml; 1803 rcu_assign_pointer(inet->mc_list, iml);
1803 ip_mc_inc_group(in_dev, addr); 1804 ip_mc_inc_group(in_dev, addr);
1804 err = 0; 1805 err = 0;
1805done: 1806done:
@@ -1807,24 +1808,46 @@ done:
1807 return err; 1808 return err;
1808} 1809}
1809 1810
1811static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1812{
1813 struct ip_sf_socklist *psf;
1814
1815 psf = container_of(rp, struct ip_sf_socklist, rcu);
1816 /* sk_omem_alloc should have been decreased by the caller*/
1817 kfree(psf);
1818}
1819
1810static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1820static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1811 struct in_device *in_dev) 1821 struct in_device *in_dev)
1812{ 1822{
1823 struct ip_sf_socklist *psf = iml->sflist;
1813 int err; 1824 int err;
1814 1825
1815 if (iml->sflist == NULL) { 1826 if (psf == NULL) {
1816 /* any-source empty exclude case */ 1827 /* any-source empty exclude case */
1817 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1828 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1818 iml->sfmode, 0, NULL, 0); 1829 iml->sfmode, 0, NULL, 0);
1819 } 1830 }
1820 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1831 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1821 iml->sfmode, iml->sflist->sl_count, 1832 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1822 iml->sflist->sl_addr, 0); 1833 rcu_assign_pointer(iml->sflist, NULL);
1823 sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max)); 1834 /* decrease mem now to avoid the memleak warning */
1824 iml->sflist = NULL; 1835 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1836 call_rcu(&psf->rcu, ip_sf_socklist_reclaim);
1825 return err; 1837 return err;
1826} 1838}
1827 1839
1840
1841static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1842{
1843 struct ip_mc_socklist *iml;
1844
1845 iml = container_of(rp, struct ip_mc_socklist, rcu);
1846 /* sk_omem_alloc should have been decreased by the caller*/
1847 kfree(iml);
1848}
1849
1850
1828/* 1851/*
1829 * Ask a socket to leave a group. 1852 * Ask a socket to leave a group.
1830 */ 1853 */
@@ -1854,12 +1877,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1854 1877
1855 (void) ip_mc_leave_src(sk, iml, in_dev); 1878 (void) ip_mc_leave_src(sk, iml, in_dev);
1856 1879
1857 *imlp = iml->next; 1880 rcu_assign_pointer(*imlp, iml->next);
1858 1881
1859 if (in_dev) 1882 if (in_dev)
1860 ip_mc_dec_group(in_dev, group); 1883 ip_mc_dec_group(in_dev, group);
1861 rtnl_unlock(); 1884 rtnl_unlock();
1862 sock_kfree_s(sk, iml, sizeof(*iml)); 1885 /* decrease mem now to avoid the memleak warning */
1886 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1887 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
1863 return 0; 1888 return 0;
1864 } 1889 }
1865 if (!in_dev) 1890 if (!in_dev)
@@ -1974,9 +1999,12 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1974 if (psl) { 1999 if (psl) {
1975 for (i=0; i<psl->sl_count; i++) 2000 for (i=0; i<psl->sl_count; i++)
1976 newpsl->sl_addr[i] = psl->sl_addr[i]; 2001 newpsl->sl_addr[i] = psl->sl_addr[i];
1977 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2002 /* decrease mem now to avoid the memleak warning */
2003 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2004 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
1978 } 2005 }
1979 pmc->sflist = psl = newpsl; 2006 rcu_assign_pointer(pmc->sflist, newpsl);
2007 psl = newpsl;
1980 } 2008 }
1981 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2009 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
1982 for (i=0; i<psl->sl_count; i++) { 2010 for (i=0; i<psl->sl_count; i++) {
@@ -2072,11 +2100,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2072 if (psl) { 2100 if (psl) {
2073 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2101 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2074 psl->sl_count, psl->sl_addr, 0); 2102 psl->sl_count, psl->sl_addr, 0);
2075 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2103 /* decrease mem now to avoid the memleak warning */
2104 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2105 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
2076 } else 2106 } else
2077 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2107 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2078 0, NULL, 0); 2108 0, NULL, 0);
2079 pmc->sflist = newpsl; 2109 rcu_assign_pointer(pmc->sflist, newpsl);
2080 pmc->sfmode = msf->imsf_fmode; 2110 pmc->sfmode = msf->imsf_fmode;
2081 err = 0; 2111 err = 0;
2082done: 2112done:
@@ -2209,30 +2239,40 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2209 struct ip_mc_socklist *pmc; 2239 struct ip_mc_socklist *pmc;
2210 struct ip_sf_socklist *psl; 2240 struct ip_sf_socklist *psl;
2211 int i; 2241 int i;
2242 int ret;
2212 2243
2244 ret = 1;
2213 if (!ipv4_is_multicast(loc_addr)) 2245 if (!ipv4_is_multicast(loc_addr))
2214 return 1; 2246 goto out;
2215 2247
2216 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2248 rcu_read_lock();
2249 for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
2217 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2250 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2218 pmc->multi.imr_ifindex == dif) 2251 pmc->multi.imr_ifindex == dif)
2219 break; 2252 break;
2220 } 2253 }
2254 ret = inet->mc_all;
2221 if (!pmc) 2255 if (!pmc)
2222 return inet->mc_all; 2256 goto unlock;
2223 psl = pmc->sflist; 2257 psl = pmc->sflist;
2258 ret = (pmc->sfmode == MCAST_EXCLUDE);
2224 if (!psl) 2259 if (!psl)
2225 return pmc->sfmode == MCAST_EXCLUDE; 2260 goto unlock;
2226 2261
2227 for (i=0; i<psl->sl_count; i++) { 2262 for (i=0; i<psl->sl_count; i++) {
2228 if (psl->sl_addr[i] == rmt_addr) 2263 if (psl->sl_addr[i] == rmt_addr)
2229 break; 2264 break;
2230 } 2265 }
2266 ret = 0;
2231 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 2267 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2232 return 0; 2268 goto unlock;
2233 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 2269 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2234 return 0; 2270 goto unlock;
2235 return 1; 2271 ret = 1;
2272unlock:
2273 rcu_read_unlock();
2274out:
2275 return ret;
2236} 2276}
2237 2277
2238/* 2278/*
@@ -2251,7 +2291,7 @@ void ip_mc_drop_socket(struct sock *sk)
2251 rtnl_lock(); 2291 rtnl_lock();
2252 while ((iml = inet->mc_list) != NULL) { 2292 while ((iml = inet->mc_list) != NULL) {
2253 struct in_device *in_dev; 2293 struct in_device *in_dev;
2254 inet->mc_list = iml->next; 2294 rcu_assign_pointer(inet->mc_list, iml->next);
2255 2295
2256 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2296 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2257 (void) ip_mc_leave_src(sk, iml, in_dev); 2297 (void) ip_mc_leave_src(sk, iml, in_dev);
@@ -2259,7 +2299,9 @@ void ip_mc_drop_socket(struct sock *sk)
2259 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2299 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2260 in_dev_put(in_dev); 2300 in_dev_put(in_dev);
2261 } 2301 }
2262 sock_kfree_s(sk, iml, sizeof(*iml)); 2302 /* decrease mem now to avoid the memleak warning */
2303 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2304 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
2263 } 2305 }
2264 rtnl_unlock(); 2306 rtnl_unlock();
2265} 2307}
@@ -2603,7 +2645,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
2603 .release = seq_release_net, 2645 .release = seq_release_net,
2604}; 2646};
2605 2647
2606static int igmp_net_init(struct net *net) 2648static int __net_init igmp_net_init(struct net *net)
2607{ 2649{
2608 struct proc_dir_entry *pde; 2650 struct proc_dir_entry *pde;
2609 2651
@@ -2621,7 +2663,7 @@ out_igmp:
2621 return -ENOMEM; 2663 return -ENOMEM;
2622} 2664}
2623 2665
2624static void igmp_net_exit(struct net *net) 2666static void __net_exit igmp_net_exit(struct net *net)
2625{ 2667{
2626 proc_net_remove(net, "mcfilter"); 2668 proc_net_remove(net, "mcfilter");
2627 proc_net_remove(net, "igmp"); 2669 proc_net_remove(net, "igmp");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ee16475f8fc3..8da6429269dd 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
529 syn_ack_recalc(req, thresh, max_retries, 529 syn_ack_recalc(req, thresh, max_retries,
530 queue->rskq_defer_accept, 530 queue->rskq_defer_accept,
531 &expire, &resend); 531 &expire, &resend);
532 if (req->rsk_ops->syn_ack_timeout)
533 req->rsk_ops->syn_ack_timeout(parent, req);
532 if (!expire && 534 if (!expire &&
533 (!resend || 535 (!resend ||
534 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 536 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1aaa8110d84b..e5fa2ddce320 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/fcntl.h> 15#include <linux/fcntl.h>
16#include <linux/random.h> 16#include <linux/random.h>
17#include <linux/slab.h>
17#include <linux/cache.h> 18#include <linux/cache.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/time.h> 20#include <linux/time.h>
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index eaf3e2c8646a..a2ca6aed763b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -19,6 +19,7 @@
19#include <linux/random.h> 19#include <linux/random.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <linux/slab.h>
22 23
23#include <net/inet_frag.h> 24#include <net/inet_frag.h>
24 25
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cc94cc2d8b2d..c5af909cf701 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/kmemcheck.h> 12#include <linux/kmemcheck.h>
13#include <linux/slab.h>
13#include <net/inet_hashtables.h> 14#include <net/inet_hashtables.h>
14#include <net/inet_timewait_sock.h> 15#include <net/inet_timewait_sock.h>
15#include <net/ip.h> 16#include <net/ip.h>
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index a2991bc8e32e..af10942b326c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -25,6 +25,7 @@
25#include <linux/ip.h> 25#include <linux/ip.h>
26#include <linux/icmp.h> 26#include <linux/icmp.h>
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/ip.h> 30#include <net/ip.h>
30#include <net/tcp.h> 31#include <net/tcp.h>
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 86964b353c31..75347ea70ea0 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -32,6 +32,9 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/jhash.h> 33#include <linux/jhash.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <linux/slab.h>
36#include <net/route.h>
37#include <net/dst.h>
35#include <net/sock.h> 38#include <net/sock.h>
36#include <net/ip.h> 39#include <net/ip.h>
37#include <net/icmp.h> 40#include <net/icmp.h>
@@ -205,11 +208,34 @@ static void ip_expire(unsigned long arg)
205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 208 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments; 209 struct sk_buff *head = qp->q.fragments;
207 210
208 /* Send an ICMP "Fragment Reassembly Timeout" message. */
209 rcu_read_lock(); 211 rcu_read_lock();
210 head->dev = dev_get_by_index_rcu(net, qp->iif); 212 head->dev = dev_get_by_index_rcu(net, qp->iif);
211 if (head->dev) 213 if (!head->dev)
212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 214 goto out_rcu_unlock;
215
216 /*
217 * Only search router table for the head fragment,
218 * when defraging timeout at PRE_ROUTING HOOK.
219 */
220 if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
221 const struct iphdr *iph = ip_hdr(head);
222 int err = ip_route_input(head, iph->daddr, iph->saddr,
223 iph->tos, head->dev);
224 if (unlikely(err))
225 goto out_rcu_unlock;
226
227 /*
228 * Only an end host needs to send an ICMP
229 * "Fragment Reassembly Timeout" message, per RFC792.
230 */
231 if (skb_rtable(head)->rt_type != RTN_LOCAL)
232 goto out_rcu_unlock;
233
234 }
235
236 /* Send an ICMP "Fragment Reassembly Timeout" message. */
237 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
238out_rcu_unlock:
213 rcu_read_unlock(); 239 rcu_read_unlock();
214 } 240 }
215out: 241out:
@@ -646,7 +672,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
646 { } 672 { }
647}; 673};
648 674
649static int ip4_frags_ns_ctl_register(struct net *net) 675static int __net_init ip4_frags_ns_ctl_register(struct net *net)
650{ 676{
651 struct ctl_table *table; 677 struct ctl_table *table;
652 struct ctl_table_header *hdr; 678 struct ctl_table_header *hdr;
@@ -676,7 +702,7 @@ err_alloc:
676 return -ENOMEM; 702 return -ENOMEM;
677} 703}
678 704
679static void ip4_frags_ns_ctl_unregister(struct net *net) 705static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
680{ 706{
681 struct ctl_table *table; 707 struct ctl_table *table;
682 708
@@ -704,7 +730,7 @@ static inline void ip4_frags_ctl_register(void)
704} 730}
705#endif 731#endif
706 732
707static int ipv4_frags_init_net(struct net *net) 733static int __net_init ipv4_frags_init_net(struct net *net)
708{ 734{
709 /* 735 /*
710 * Fragment cache limits. We will commit 256K at one time. Should we 736 * Fragment cache limits. We will commit 256K at one time. Should we
@@ -726,7 +752,7 @@ static int ipv4_frags_init_net(struct net *net)
726 return ip4_frags_ns_ctl_register(net); 752 return ip4_frags_ns_ctl_register(net);
727} 753}
728 754
729static void ipv4_frags_exit_net(struct net *net) 755static void __net_exit ipv4_frags_exit_net(struct net *net)
730{ 756{
731 ip4_frags_ns_ctl_unregister(net); 757 ip4_frags_ns_ctl_unregister(net);
732 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 758 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f36ce156cac6..fe381d12ecdd 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/slab.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
@@ -793,7 +794,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
793 } 794 }
794 795
795 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { 796 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
796 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 797 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
797 ip_rt_put(rt); 798 ip_rt_put(rt);
798 goto tx_error; 799 goto tx_error;
799 } 800 }
@@ -810,11 +811,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
810 tunnel->err_count = 0; 811 tunnel->err_count = 0;
811 } 812 }
812 813
813 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; 814 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
814 815
815 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 816 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
816 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 817 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
817 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 818 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
819 if (max_headroom > dev->needed_headroom)
820 dev->needed_headroom = max_headroom;
818 if (!new_skb) { 821 if (!new_skb) {
819 ip_rt_put(rt); 822 ip_rt_put(rt);
820 txq->tx_dropped++; 823 txq->tx_dropped++;
@@ -1144,12 +1147,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1144 1147
1145 if (saddr) 1148 if (saddr)
1146 memcpy(&iph->saddr, saddr, 4); 1149 memcpy(&iph->saddr, saddr, 4);
1147 1150 if (daddr)
1148 if (daddr) {
1149 memcpy(&iph->daddr, daddr, 4); 1151 memcpy(&iph->daddr, daddr, 4);
1150 return t->hlen; 1152 if (iph->daddr)
1151 }
1152 if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1153 return t->hlen; 1153 return t->hlen;
1154 1154
1155 return -t->hlen; 1155 return -t->hlen;
@@ -1307,7 +1307,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1307 } 1307 }
1308} 1308}
1309 1309
1310static int ipgre_init_net(struct net *net) 1310static int __net_init ipgre_init_net(struct net *net)
1311{ 1311{
1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1313 int err; 1313 int err;
@@ -1334,7 +1334,7 @@ err_alloc_dev:
1334 return err; 1334 return err;
1335} 1335}
1336 1336
1337static void ipgre_exit_net(struct net *net) 1337static void __net_exit ipgre_exit_net(struct net *net)
1338{ 1338{
1339 struct ipgre_net *ign; 1339 struct ipgre_net *ign;
1340 LIST_HEAD(list); 1340 LIST_HEAD(list);
@@ -1665,14 +1665,15 @@ static int __init ipgre_init(void)
1665 1665
1666 printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); 1666 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1667 1667
1668 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1669 printk(KERN_INFO "ipgre init: can't add protocol\n");
1670 return -EAGAIN;
1671 }
1672
1673 err = register_pernet_device(&ipgre_net_ops); 1668 err = register_pernet_device(&ipgre_net_ops);
1674 if (err < 0) 1669 if (err < 0)
1675 goto gen_device_failed; 1670 return err;
1671
1672 err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
1673 if (err < 0) {
1674 printk(KERN_INFO "ipgre init: can't add protocol\n");
1675 goto add_proto_failed;
1676 }
1676 1677
1677 err = rtnl_link_register(&ipgre_link_ops); 1678 err = rtnl_link_register(&ipgre_link_ops);
1678 if (err < 0) 1679 if (err < 0)
@@ -1688,9 +1689,9 @@ out:
1688tap_ops_failed: 1689tap_ops_failed:
1689 rtnl_link_unregister(&ipgre_link_ops); 1690 rtnl_link_unregister(&ipgre_link_ops);
1690rtnl_link_failed: 1691rtnl_link_failed:
1691 unregister_pernet_device(&ipgre_net_ops);
1692gen_device_failed:
1693 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); 1692 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1693add_proto_failed:
1694 unregister_pernet_device(&ipgre_net_ops);
1694 goto out; 1695 goto out;
1695} 1696}
1696 1697
@@ -1698,9 +1699,9 @@ static void __exit ipgre_fini(void)
1698{ 1699{
1699 rtnl_link_unregister(&ipgre_tap_ops); 1700 rtnl_link_unregister(&ipgre_tap_ops);
1700 rtnl_link_unregister(&ipgre_link_ops); 1701 rtnl_link_unregister(&ipgre_link_ops);
1701 unregister_pernet_device(&ipgre_net_ops);
1702 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1702 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1703 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1703 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1704 unregister_pernet_device(&ipgre_net_ops);
1704} 1705}
1705 1706
1706module_init(ipgre_init); 1707module_init(ipgre_init);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index c29de9879fda..f8ab7a380d4a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -119,6 +119,7 @@
119#include <linux/kernel.h> 119#include <linux/kernel.h>
120#include <linux/string.h> 120#include <linux/string.h>
121#include <linux/errno.h> 121#include <linux/errno.h>
122#include <linux/slab.h>
122 123
123#include <linux/net.h> 124#include <linux/net.h>
124#include <linux/socket.h> 125#include <linux/socket.h>
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 94bf105ef3c9..4c09a31fd140 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/capability.h> 12#include <linux/capability.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <asm/uaccess.h> 16#include <asm/uaccess.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3451799e3dbf..d1bcc9f21d4f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -51,6 +51,7 @@
51#include <linux/string.h> 51#include <linux/string.h>
52#include <linux/errno.h> 52#include <linux/errno.h>
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/slab.h>
54 55
55#include <linux/socket.h> 56#include <linux/socket.h>
56#include <linux/sockios.h> 57#include <linux/sockios.h>
@@ -119,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
119 newskb->pkt_type = PACKET_LOOPBACK; 120 newskb->pkt_type = PACKET_LOOPBACK;
120 newskb->ip_summed = CHECKSUM_UNNECESSARY; 121 newskb->ip_summed = CHECKSUM_UNNECESSARY;
121 WARN_ON(!skb_dst(newskb)); 122 WARN_ON(!skb_dst(newskb));
122 netif_rx(newskb); 123 netif_rx_ni(newskb);
123 return 0; 124 return 0;
124} 125}
125 126
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9baff03..1e64dabbd232 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -23,6 +23,7 @@
23#include <linux/icmp.h> 23#include <linux/icmp.h>
24#include <linux/inetdevice.h> 24#include <linux/inetdevice.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/slab.h>
26#include <net/sock.h> 27#include <net/sock.h>
27#include <net/ip.h> 28#include <net/ip.h>
28#include <net/icmp.h> 29#include <net/icmp.h>
@@ -451,7 +452,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
451 (1<<IP_TTL) | (1<<IP_HDRINCL) | 452 (1<<IP_TTL) | (1<<IP_HDRINCL) |
452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 453 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 454 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || 455 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
456 (1<<IP_MINTTL))) ||
455 optname == IP_MULTICAST_TTL || 457 optname == IP_MULTICAST_TTL ||
456 optname == IP_MULTICAST_ALL || 458 optname == IP_MULTICAST_ALL ||
457 optname == IP_MULTICAST_LOOP || 459 optname == IP_MULTICAST_LOOP ||
@@ -936,6 +938,14 @@ mc_msf_out:
936 inet->transparent = !!val; 938 inet->transparent = !!val;
937 break; 939 break;
938 940
941 case IP_MINTTL:
942 if (optlen < 1)
943 goto e_inval;
944 if (val < 0 || val > 255)
945 goto e_inval;
946 inet->min_ttl = val;
947 break;
948
939 default: 949 default:
940 err = -ENOPROTOOPT; 950 err = -ENOPROTOOPT;
941 break; 951 break;
@@ -1198,6 +1208,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1198 case IP_TRANSPARENT: 1208 case IP_TRANSPARENT:
1199 val = inet->transparent; 1209 val = inet->transparent;
1200 break; 1210 break;
1211 case IP_MINTTL:
1212 val = inet->min_ttl;
1213 break;
1201 default: 1214 default:
1202 release_sock(sk); 1215 release_sock(sk);
1203 return -ENOPROTOOPT; 1216 return -ENOPROTOOPT;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 544ce0876f12..629067571f02 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -25,6 +25,7 @@
25 25
26static void ipcomp4_err(struct sk_buff *skb, u32 info) 26static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev);
28 __be32 spi; 29 __be32 spi;
29 struct iphdr *iph = (struct iphdr *)skb->data; 30 struct iphdr *iph = (struct iphdr *)skb->data;
30 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
@@ -35,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
35 return; 36 return;
36 37
37 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
38 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr,
39 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
40 if (!x) 41 if (!x)
41 return; 42 return;
@@ -47,9 +48,10 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47/* We always hold one tunnel user reference to indicate a tunnel */ 48/* We always hold one tunnel user reference to indicate a tunnel */
48static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 49static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
49{ 50{
51 struct net *net = xs_net(x);
50 struct xfrm_state *t; 52 struct xfrm_state *t;
51 53
52 t = xfrm_state_alloc(&init_net); 54 t = xfrm_state_alloc(net);
53 if (t == NULL) 55 if (t == NULL)
54 goto out; 56 goto out;
55 57
@@ -61,6 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
61 t->props.mode = x->props.mode; 63 t->props.mode = x->props.mode;
62 t->props.saddr.a4 = x->props.saddr.a4; 64 t->props.saddr.a4 = x->props.saddr.a4;
63 t->props.flags = x->props.flags; 65 t->props.flags = x->props.flags;
66 memcpy(&t->mark, &x->mark, sizeof(t->mark));
64 67
65 if (xfrm_init_state(t)) 68 if (xfrm_init_state(t))
66 goto error; 69 goto error;
@@ -82,10 +85,12 @@ error:
82 */ 85 */
83static int ipcomp_tunnel_attach(struct xfrm_state *x) 86static int ipcomp_tunnel_attach(struct xfrm_state *x)
84{ 87{
88 struct net *net = xs_net(x);
85 int err = 0; 89 int err = 0;
86 struct xfrm_state *t; 90 struct xfrm_state *t;
91 u32 mark = x->mark.v & x->mark.m;
87 92
88 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4, 93 t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr.a4,
89 x->props.saddr.a4, IPPROTO_IPIP, AF_INET); 94 x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
90 if (!t) { 95 if (!t) {
91 t = ipcomp_tunnel_create(x); 96 t = ipcomp_tunnel_create(x);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 10a6a604bf32..067ce9e043dc 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -53,6 +53,7 @@
53#include <linux/root_dev.h> 53#include <linux/root_dev.h>
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <linux/nfs_fs.h> 55#include <linux/nfs_fs.h>
56#include <linux/slab.h>
56#include <net/net_namespace.h> 57#include <net/net_namespace.h>
57#include <net/arp.h> 58#include <net/arp.h>
58#include <net/ip.h> 59#include <net/ip.h>
@@ -187,6 +188,16 @@ struct ic_device {
187static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ 188static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
188static struct net_device *ic_dev __initdata = NULL; /* Selected device */ 189static struct net_device *ic_dev __initdata = NULL; /* Selected device */
189 190
191static bool __init ic_device_match(struct net_device *dev)
192{
193 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
194 (!(dev->flags & IFF_LOOPBACK) &&
195 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
196 strncmp(dev->name, "dummy", 5)))
197 return true;
198 return false;
199}
200
190static int __init ic_open_devs(void) 201static int __init ic_open_devs(void)
191{ 202{
192 struct ic_device *d, **last; 203 struct ic_device *d, **last;
@@ -207,10 +218,7 @@ static int __init ic_open_devs(void)
207 for_each_netdev(&init_net, dev) { 218 for_each_netdev(&init_net, dev) {
208 if (dev->flags & IFF_LOOPBACK) 219 if (dev->flags & IFF_LOOPBACK)
209 continue; 220 continue;
210 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : 221 if (ic_device_match(dev)) {
211 (!(dev->flags & IFF_LOOPBACK) &&
212 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
213 strncmp(dev->name, "dummy", 5))) {
214 int able = 0; 222 int able = 0;
215 if (dev->mtu >= 364) 223 if (dev->mtu >= 364)
216 able |= IC_BOOTP; 224 able |= IC_BOOTP;
@@ -228,7 +236,7 @@ static int __init ic_open_devs(void)
228 } 236 }
229 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { 237 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) {
230 rtnl_unlock(); 238 rtnl_unlock();
231 return -1; 239 return -ENOMEM;
232 } 240 }
233 d->dev = dev; 241 d->dev = dev;
234 *last = d; 242 *last = d;
@@ -253,7 +261,7 @@ static int __init ic_open_devs(void)
253 printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); 261 printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name);
254 else 262 else
255 printk(KERN_ERR "IP-Config: No network devices available.\n"); 263 printk(KERN_ERR "IP-Config: No network devices available.\n");
256 return -1; 264 return -ENODEV;
257 } 265 }
258 return 0; 266 return 0;
259} 267}
@@ -1303,6 +1311,32 @@ __be32 __init root_nfs_parse_addr(char *name)
1303 return addr; 1311 return addr;
1304} 1312}
1305 1313
1314#define DEVICE_WAIT_MAX 12 /* 12 seconds */
1315
1316static int __init wait_for_devices(void)
1317{
1318 int i;
1319
1320 msleep(CONF_PRE_OPEN);
1321 for (i = 0; i < DEVICE_WAIT_MAX; i++) {
1322 struct net_device *dev;
1323 int found = 0;
1324
1325 rtnl_lock();
1326 for_each_netdev(&init_net, dev) {
1327 if (ic_device_match(dev)) {
1328 found = 1;
1329 break;
1330 }
1331 }
1332 rtnl_unlock();
1333 if (found)
1334 return 0;
1335 ssleep(1);
1336 }
1337 return -ENODEV;
1338}
1339
1306/* 1340/*
1307 * IP Autoconfig dispatcher. 1341 * IP Autoconfig dispatcher.
1308 */ 1342 */
@@ -1313,6 +1347,7 @@ static int __init ip_auto_config(void)
1313#ifdef IPCONFIG_DYNAMIC 1347#ifdef IPCONFIG_DYNAMIC
1314 int retries = CONF_OPEN_RETRIES; 1348 int retries = CONF_OPEN_RETRIES;
1315#endif 1349#endif
1350 int err;
1316 1351
1317#ifdef CONFIG_PROC_FS 1352#ifdef CONFIG_PROC_FS
1318 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); 1353 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1325,12 +1360,15 @@ static int __init ip_auto_config(void)
1325#ifdef IPCONFIG_DYNAMIC 1360#ifdef IPCONFIG_DYNAMIC
1326 try_try_again: 1361 try_try_again:
1327#endif 1362#endif
1328 /* Give hardware a chance to settle */ 1363 /* Wait for devices to appear */
1329 msleep(CONF_PRE_OPEN); 1364 err = wait_for_devices();
1365 if (err)
1366 return err;
1330 1367
1331 /* Setup all network devices */ 1368 /* Setup all network devices */
1332 if (ic_open_devs() < 0) 1369 err = ic_open_devs();
1333 return -1; 1370 if (err)
1371 return err;
1334 1372
1335 /* Give drivers a chance to settle */ 1373 /* Give drivers a chance to settle */
1336 ssleep(CONF_POST_OPEN); 1374 ssleep(CONF_POST_OPEN);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eda04fed3379..0b27b14dcc9d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -95,6 +95,7 @@
95#include <linux/module.h> 95#include <linux/module.h>
96#include <linux/types.h> 96#include <linux/types.h>
97#include <linux/kernel.h> 97#include <linux/kernel.h>
98#include <linux/slab.h>
98#include <asm/uaccess.h> 99#include <asm/uaccess.h>
99#include <linux/skbuff.h> 100#include <linux/skbuff.h>
100#include <linux/netdevice.h> 101#include <linux/netdevice.h>
@@ -130,7 +131,6 @@ struct ipip_net {
130 struct net_device *fb_tunnel_dev; 131 struct net_device *fb_tunnel_dev;
131}; 132};
132 133
133static void ipip_fb_tunnel_init(struct net_device *dev);
134static void ipip_tunnel_init(struct net_device *dev); 134static void ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 135static void ipip_tunnel_setup(struct net_device *dev);
136 136
@@ -730,7 +730,7 @@ static void ipip_tunnel_init(struct net_device *dev)
730 ipip_tunnel_bind_dev(dev); 730 ipip_tunnel_bind_dev(dev);
731} 731}
732 732
733static void ipip_fb_tunnel_init(struct net_device *dev) 733static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
734{ 734{
735 struct ip_tunnel *tunnel = netdev_priv(dev); 735 struct ip_tunnel *tunnel = netdev_priv(dev);
736 struct iphdr *iph = &tunnel->parms.iph; 736 struct iphdr *iph = &tunnel->parms.iph;
@@ -773,7 +773,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
773 } 773 }
774} 774}
775 775
776static int ipip_init_net(struct net *net) 776static int __net_init ipip_init_net(struct net *net)
777{ 777{
778 struct ipip_net *ipn = net_generic(net, ipip_net_id); 778 struct ipip_net *ipn = net_generic(net, ipip_net_id);
779 int err; 779 int err;
@@ -806,7 +806,7 @@ err_alloc_dev:
806 return err; 806 return err;
807} 807}
808 808
809static void ipip_exit_net(struct net *net) 809static void __net_exit ipip_exit_net(struct net *net)
810{ 810{
811 struct ipip_net *ipn = net_generic(net, ipip_net_id); 811 struct ipip_net *ipn = net_generic(net, ipip_net_id);
812 LIST_HEAD(list); 812 LIST_HEAD(list);
@@ -831,15 +831,14 @@ static int __init ipip_init(void)
831 831
832 printk(banner); 832 printk(banner);
833 833
834 if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) { 834 err = register_pernet_device(&ipip_net_ops);
835 if (err < 0)
836 return err;
837 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
838 if (err < 0) {
839 unregister_pernet_device(&ipip_net_ops);
835 printk(KERN_INFO "ipip init: can't register tunnel\n"); 840 printk(KERN_INFO "ipip init: can't register tunnel\n");
836 return -EAGAIN;
837 } 841 }
838
839 err = register_pernet_device(&ipip_net_ops);
840 if (err)
841 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
842
843 return err; 842 return err;
844} 843}
845 844
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 54596f73eff5..9d4f6d1340a4 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -47,6 +47,7 @@
47#include <linux/mroute.h> 47#include <linux/mroute.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/if_ether.h> 49#include <linux/if_ether.h>
50#include <linux/slab.h>
50#include <net/net_namespace.h> 51#include <net/net_namespace.h>
51#include <net/ip.h> 52#include <net/ip.h>
52#include <net/protocol.h> 53#include <net/protocol.h>
@@ -802,6 +803,9 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
802 int line; 803 int line;
803 struct mfc_cache *uc, *c, **cp; 804 struct mfc_cache *uc, *c, **cp;
804 805
806 if (mfc->mfcc_parent >= MAXVIFS)
807 return -ENFILE;
808
805 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 809 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
806 810
807 for (cp = &net->ipv4.mfc_cache_array[line]; 811 for (cp = &net->ipv4.mfc_cache_array[line];
@@ -1163,9 +1167,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1163 int ct; 1167 int ct;
1164 LIST_HEAD(list); 1168 LIST_HEAD(list);
1165 1169
1166 if (!net_eq(dev_net(dev), net))
1167 return NOTIFY_DONE;
1168
1169 if (event != NETDEV_UNREGISTER) 1170 if (event != NETDEV_UNREGISTER)
1170 return NOTIFY_DONE; 1171 return NOTIFY_DONE;
1171 v = &net->ipv4.vif_table[0]; 1172 v = &net->ipv4.vif_table[0];
@@ -1616,17 +1617,20 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1616 int ct; 1617 int ct;
1617 struct rtnexthop *nhp; 1618 struct rtnexthop *nhp;
1618 struct net *net = mfc_net(c); 1619 struct net *net = mfc_net(c);
1619 struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
1620 u8 *b = skb_tail_pointer(skb); 1620 u8 *b = skb_tail_pointer(skb);
1621 struct rtattr *mp_head; 1621 struct rtattr *mp_head;
1622 1622
1623 if (dev) 1623 /* If cache is unresolved, don't try to parse IIF and OIF */
1624 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1624 if (c->mfc_parent > MAXVIFS)
1625 return -ENOENT;
1626
1627 if (VIF_EXISTS(net, c->mfc_parent))
1628 RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex);
1625 1629
1626 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1630 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1627 1631
1628 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1632 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1629 if (c->mfc_un.res.ttls[ct] < 255) { 1633 if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
1630 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1634 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1631 goto rtattr_failure; 1635 goto rtattr_failure;
1632 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1636 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c14623fc4d5e..82fb43c5c59e 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -4,6 +4,7 @@
4#include <linux/netfilter_ipv4.h> 4#include <linux/netfilter_ipv4.h>
5#include <linux/ip.h> 5#include <linux/ip.h>
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/gfp.h>
7#include <net/route.h> 8#include <net/route.h>
8#include <net/xfrm.h> 9#include <net/xfrm.h>
9#include <net/ip.h> 10#include <net/ip.h>
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 90203e1b9187..f07d77f65751 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_arp/arp_tables.h> 29#include <linux/netfilter_arp/arp_tables.h>
30#include "../../netfilter/xt_repldata.h"
30 31
31MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
32MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 33MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
@@ -58,6 +59,12 @@ do { \
58#define ARP_NF_ASSERT(x) 59#define ARP_NF_ASSERT(x)
59#endif 60#endif
60 61
62void *arpt_alloc_initial_table(const struct xt_table *info)
63{
64 return xt_alloc_initial_table(arpt, ARPT);
65}
66EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
67
61static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, 68static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
62 const char *hdr_addr, int len) 69 const char *hdr_addr, int len)
63{ 70{
@@ -226,7 +233,14 @@ arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
226 return NF_DROP; 233 return NF_DROP;
227} 234}
228 235
229static inline struct arpt_entry *get_entry(void *base, unsigned int offset) 236static inline const struct arpt_entry_target *
237arpt_get_target_c(const struct arpt_entry *e)
238{
239 return arpt_get_target((struct arpt_entry *)e);
240}
241
242static inline struct arpt_entry *
243get_entry(const void *base, unsigned int offset)
230{ 244{
231 return (struct arpt_entry *)(base + offset); 245 return (struct arpt_entry *)(base + offset);
232} 246}
@@ -273,7 +287,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
273 287
274 arp = arp_hdr(skb); 288 arp = arp_hdr(skb);
275 do { 289 do {
276 struct arpt_entry_target *t; 290 const struct arpt_entry_target *t;
277 int hdr_len; 291 int hdr_len;
278 292
279 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 293 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
@@ -285,7 +299,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
285 (2 * skb->dev->addr_len); 299 (2 * skb->dev->addr_len);
286 ADD_COUNTER(e->counters, hdr_len, 1); 300 ADD_COUNTER(e->counters, hdr_len, 1);
287 301
288 t = arpt_get_target(e); 302 t = arpt_get_target_c(e);
289 303
290 /* Standard target? */ 304 /* Standard target? */
291 if (!t->u.kernel.target->target) { 305 if (!t->u.kernel.target->target) {
@@ -351,7 +365,7 @@ static inline bool unconditional(const struct arpt_arp *arp)
351/* Figures out from what hook each rule can be called: returns 0 if 365/* Figures out from what hook each rule can be called: returns 0 if
352 * there are loops. Puts hook bitmask in comefrom. 366 * there are loops. Puts hook bitmask in comefrom.
353 */ 367 */
354static int mark_source_chains(struct xt_table_info *newinfo, 368static int mark_source_chains(const struct xt_table_info *newinfo,
355 unsigned int valid_hooks, void *entry0) 369 unsigned int valid_hooks, void *entry0)
356{ 370{
357 unsigned int hook; 371 unsigned int hook;
@@ -372,7 +386,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
372 386
373 for (;;) { 387 for (;;) {
374 const struct arpt_standard_target *t 388 const struct arpt_standard_target *t
375 = (void *)arpt_get_target(e); 389 = (void *)arpt_get_target_c(e);
376 int visited = e->comefrom & (1 << hook); 390 int visited = e->comefrom & (1 << hook);
377 391
378 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { 392 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
@@ -456,7 +470,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
456 return 1; 470 return 1;
457} 471}
458 472
459static inline int check_entry(struct arpt_entry *e, const char *name) 473static inline int check_entry(const struct arpt_entry *e, const char *name)
460{ 474{
461 const struct arpt_entry_target *t; 475 const struct arpt_entry_target *t;
462 476
@@ -468,7 +482,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
468 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) 482 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
469 return -EINVAL; 483 return -EINVAL;
470 484
471 t = arpt_get_target(e); 485 t = arpt_get_target_c(e);
472 if (e->target_offset + t->u.target_size > e->next_offset) 486 if (e->target_offset + t->u.target_size > e->next_offset)
473 return -EINVAL; 487 return -EINVAL;
474 488
@@ -498,8 +512,7 @@ static inline int check_target(struct arpt_entry *e, const char *name)
498} 512}
499 513
500static inline int 514static inline int
501find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, 515find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
502 unsigned int *i)
503{ 516{
504 struct arpt_entry_target *t; 517 struct arpt_entry_target *t;
505 struct xt_target *target; 518 struct xt_target *target;
@@ -524,8 +537,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
524 ret = check_target(e, name); 537 ret = check_target(e, name);
525 if (ret) 538 if (ret)
526 goto err; 539 goto err;
527
528 (*i)++;
529 return 0; 540 return 0;
530err: 541err:
531 module_put(t->u.kernel.target->me); 542 module_put(t->u.kernel.target->me);
@@ -533,14 +544,14 @@ out:
533 return ret; 544 return ret;
534} 545}
535 546
536static bool check_underflow(struct arpt_entry *e) 547static bool check_underflow(const struct arpt_entry *e)
537{ 548{
538 const struct arpt_entry_target *t; 549 const struct arpt_entry_target *t;
539 unsigned int verdict; 550 unsigned int verdict;
540 551
541 if (!unconditional(&e->arp)) 552 if (!unconditional(&e->arp))
542 return false; 553 return false;
543 t = arpt_get_target(e); 554 t = arpt_get_target_c(e);
544 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 555 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
545 return false; 556 return false;
546 verdict = ((struct arpt_standard_target *)t)->verdict; 557 verdict = ((struct arpt_standard_target *)t)->verdict;
@@ -550,12 +561,11 @@ static bool check_underflow(struct arpt_entry *e)
550 561
551static inline int check_entry_size_and_hooks(struct arpt_entry *e, 562static inline int check_entry_size_and_hooks(struct arpt_entry *e,
552 struct xt_table_info *newinfo, 563 struct xt_table_info *newinfo,
553 unsigned char *base, 564 const unsigned char *base,
554 unsigned char *limit, 565 const unsigned char *limit,
555 const unsigned int *hook_entries, 566 const unsigned int *hook_entries,
556 const unsigned int *underflows, 567 const unsigned int *underflows,
557 unsigned int valid_hooks, 568 unsigned int valid_hooks)
558 unsigned int *i)
559{ 569{
560 unsigned int h; 570 unsigned int h;
561 571
@@ -592,19 +602,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
592 /* Clear counters and comefrom */ 602 /* Clear counters and comefrom */
593 e->counters = ((struct xt_counters) { 0, 0 }); 603 e->counters = ((struct xt_counters) { 0, 0 });
594 e->comefrom = 0; 604 e->comefrom = 0;
595
596 (*i)++;
597 return 0; 605 return 0;
598} 606}
599 607
600static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i) 608static inline void cleanup_entry(struct arpt_entry *e)
601{ 609{
602 struct xt_tgdtor_param par; 610 struct xt_tgdtor_param par;
603 struct arpt_entry_target *t; 611 struct arpt_entry_target *t;
604 612
605 if (i && (*i)-- == 0)
606 return 1;
607
608 t = arpt_get_target(e); 613 t = arpt_get_target(e);
609 par.target = t->u.kernel.target; 614 par.target = t->u.kernel.target;
610 par.targinfo = t->data; 615 par.targinfo = t->data;
@@ -612,26 +617,20 @@ static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
612 if (par.target->destroy != NULL) 617 if (par.target->destroy != NULL)
613 par.target->destroy(&par); 618 par.target->destroy(&par);
614 module_put(par.target->me); 619 module_put(par.target->me);
615 return 0;
616} 620}
617 621
618/* Checks and translates the user-supplied table segment (held in 622/* Checks and translates the user-supplied table segment (held in
619 * newinfo). 623 * newinfo).
620 */ 624 */
621static int translate_table(const char *name, 625static int translate_table(struct xt_table_info *newinfo, void *entry0,
622 unsigned int valid_hooks, 626 const struct arpt_replace *repl)
623 struct xt_table_info *newinfo,
624 void *entry0,
625 unsigned int size,
626 unsigned int number,
627 const unsigned int *hook_entries,
628 const unsigned int *underflows)
629{ 627{
628 struct arpt_entry *iter;
630 unsigned int i; 629 unsigned int i;
631 int ret; 630 int ret = 0;
632 631
633 newinfo->size = size; 632 newinfo->size = repl->size;
634 newinfo->number = number; 633 newinfo->number = repl->num_entries;
635 634
636 /* Init all hooks to impossible value. */ 635 /* Init all hooks to impossible value. */
637 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 636 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
@@ -643,52 +642,63 @@ static int translate_table(const char *name,
643 i = 0; 642 i = 0;
644 643
645 /* Walk through entries, checking offsets. */ 644 /* Walk through entries, checking offsets. */
646 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 645 xt_entry_foreach(iter, entry0, newinfo->size) {
647 check_entry_size_and_hooks, 646 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
648 newinfo, 647 entry0 + repl->size,
649 entry0, 648 repl->hook_entry,
650 entry0 + size, 649 repl->underflow,
651 hook_entries, underflows, valid_hooks, &i); 650 repl->valid_hooks);
651 if (ret != 0)
652 break;
653 ++i;
654 }
652 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); 655 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
653 if (ret != 0) 656 if (ret != 0)
654 return ret; 657 return ret;
655 658
656 if (i != number) { 659 if (i != repl->num_entries) {
657 duprintf("translate_table: %u not %u entries\n", 660 duprintf("translate_table: %u not %u entries\n",
658 i, number); 661 i, repl->num_entries);
659 return -EINVAL; 662 return -EINVAL;
660 } 663 }
661 664
662 /* Check hooks all assigned */ 665 /* Check hooks all assigned */
663 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 666 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
664 /* Only hooks which are valid */ 667 /* Only hooks which are valid */
665 if (!(valid_hooks & (1 << i))) 668 if (!(repl->valid_hooks & (1 << i)))
666 continue; 669 continue;
667 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 670 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
668 duprintf("Invalid hook entry %u %u\n", 671 duprintf("Invalid hook entry %u %u\n",
669 i, hook_entries[i]); 672 i, repl->hook_entry[i]);
670 return -EINVAL; 673 return -EINVAL;
671 } 674 }
672 if (newinfo->underflow[i] == 0xFFFFFFFF) { 675 if (newinfo->underflow[i] == 0xFFFFFFFF) {
673 duprintf("Invalid underflow %u %u\n", 676 duprintf("Invalid underflow %u %u\n",
674 i, underflows[i]); 677 i, repl->underflow[i]);
675 return -EINVAL; 678 return -EINVAL;
676 } 679 }
677 } 680 }
678 681
679 if (!mark_source_chains(newinfo, valid_hooks, entry0)) { 682 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
680 duprintf("Looping hook\n"); 683 duprintf("Looping hook\n");
681 return -ELOOP; 684 return -ELOOP;
682 } 685 }
683 686
684 /* Finally, each sanity check must pass */ 687 /* Finally, each sanity check must pass */
685 i = 0; 688 i = 0;
686 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 689 xt_entry_foreach(iter, entry0, newinfo->size) {
687 find_check_entry, name, size, &i); 690 ret = find_check_entry(iter, repl->name, repl->size);
691 if (ret != 0)
692 break;
693 ++i;
694 }
688 695
689 if (ret != 0) { 696 if (ret != 0) {
690 ARPT_ENTRY_ITERATE(entry0, newinfo->size, 697 xt_entry_foreach(iter, entry0, newinfo->size) {
691 cleanup_entry, &i); 698 if (i-- == 0)
699 break;
700 cleanup_entry(iter);
701 }
692 return ret; 702 return ret;
693 } 703 }
694 704
@@ -701,30 +711,10 @@ static int translate_table(const char *name,
701 return ret; 711 return ret;
702} 712}
703 713
704/* Gets counters. */
705static inline int add_entry_to_counter(const struct arpt_entry *e,
706 struct xt_counters total[],
707 unsigned int *i)
708{
709 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
710
711 (*i)++;
712 return 0;
713}
714
715static inline int set_entry_to_counter(const struct arpt_entry *e,
716 struct xt_counters total[],
717 unsigned int *i)
718{
719 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
720
721 (*i)++;
722 return 0;
723}
724
725static void get_counters(const struct xt_table_info *t, 714static void get_counters(const struct xt_table_info *t,
726 struct xt_counters counters[]) 715 struct xt_counters counters[])
727{ 716{
717 struct arpt_entry *iter;
728 unsigned int cpu; 718 unsigned int cpu;
729 unsigned int i; 719 unsigned int i;
730 unsigned int curcpu; 720 unsigned int curcpu;
@@ -740,32 +730,32 @@ static void get_counters(const struct xt_table_info *t,
740 curcpu = smp_processor_id(); 730 curcpu = smp_processor_id();
741 731
742 i = 0; 732 i = 0;
743 ARPT_ENTRY_ITERATE(t->entries[curcpu], 733 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
744 t->size, 734 SET_COUNTER(counters[i], iter->counters.bcnt,
745 set_entry_to_counter, 735 iter->counters.pcnt);
746 counters, 736 ++i;
747 &i); 737 }
748 738
749 for_each_possible_cpu(cpu) { 739 for_each_possible_cpu(cpu) {
750 if (cpu == curcpu) 740 if (cpu == curcpu)
751 continue; 741 continue;
752 i = 0; 742 i = 0;
753 xt_info_wrlock(cpu); 743 xt_info_wrlock(cpu);
754 ARPT_ENTRY_ITERATE(t->entries[cpu], 744 xt_entry_foreach(iter, t->entries[cpu], t->size) {
755 t->size, 745 ADD_COUNTER(counters[i], iter->counters.bcnt,
756 add_entry_to_counter, 746 iter->counters.pcnt);
757 counters, 747 ++i;
758 &i); 748 }
759 xt_info_wrunlock(cpu); 749 xt_info_wrunlock(cpu);
760 } 750 }
761 local_bh_enable(); 751 local_bh_enable();
762} 752}
763 753
764static struct xt_counters *alloc_counters(struct xt_table *table) 754static struct xt_counters *alloc_counters(const struct xt_table *table)
765{ 755{
766 unsigned int countersize; 756 unsigned int countersize;
767 struct xt_counters *counters; 757 struct xt_counters *counters;
768 struct xt_table_info *private = table->private; 758 const struct xt_table_info *private = table->private;
769 759
770 /* We need atomic snapshot of counters: rest doesn't change 760 /* We need atomic snapshot of counters: rest doesn't change
771 * (other than comefrom, which userspace doesn't care 761 * (other than comefrom, which userspace doesn't care
@@ -783,11 +773,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
783} 773}
784 774
785static int copy_entries_to_user(unsigned int total_size, 775static int copy_entries_to_user(unsigned int total_size,
786 struct xt_table *table, 776 const struct xt_table *table,
787 void __user *userptr) 777 void __user *userptr)
788{ 778{
789 unsigned int off, num; 779 unsigned int off, num;
790 struct arpt_entry *e; 780 const struct arpt_entry *e;
791 struct xt_counters *counters; 781 struct xt_counters *counters;
792 struct xt_table_info *private = table->private; 782 struct xt_table_info *private = table->private;
793 int ret = 0; 783 int ret = 0;
@@ -807,7 +797,7 @@ static int copy_entries_to_user(unsigned int total_size,
807 /* FIXME: use iterator macros --RR */ 797 /* FIXME: use iterator macros --RR */
808 /* ... then go back and fix counters and names */ 798 /* ... then go back and fix counters and names */
809 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 799 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
810 struct arpt_entry_target *t; 800 const struct arpt_entry_target *t;
811 801
812 e = (struct arpt_entry *)(loc_cpu_entry + off); 802 e = (struct arpt_entry *)(loc_cpu_entry + off);
813 if (copy_to_user(userptr + off 803 if (copy_to_user(userptr + off
@@ -818,7 +808,7 @@ static int copy_entries_to_user(unsigned int total_size,
818 goto free_counters; 808 goto free_counters;
819 } 809 }
820 810
821 t = arpt_get_target(e); 811 t = arpt_get_target_c(e);
822 if (copy_to_user(userptr + off + e->target_offset 812 if (copy_to_user(userptr + off + e->target_offset
823 + offsetof(struct arpt_entry_target, 813 + offsetof(struct arpt_entry_target,
824 u.user.name), 814 u.user.name),
@@ -835,7 +825,7 @@ static int copy_entries_to_user(unsigned int total_size,
835} 825}
836 826
837#ifdef CONFIG_COMPAT 827#ifdef CONFIG_COMPAT
838static void compat_standard_from_user(void *dst, void *src) 828static void compat_standard_from_user(void *dst, const void *src)
839{ 829{
840 int v = *(compat_int_t *)src; 830 int v = *(compat_int_t *)src;
841 831
@@ -844,7 +834,7 @@ static void compat_standard_from_user(void *dst, void *src)
844 memcpy(dst, &v, sizeof(v)); 834 memcpy(dst, &v, sizeof(v));
845} 835}
846 836
847static int compat_standard_to_user(void __user *dst, void *src) 837static int compat_standard_to_user(void __user *dst, const void *src)
848{ 838{
849 compat_int_t cv = *(int *)src; 839 compat_int_t cv = *(int *)src;
850 840
@@ -853,18 +843,18 @@ static int compat_standard_to_user(void __user *dst, void *src)
853 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 843 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
854} 844}
855 845
856static int compat_calc_entry(struct arpt_entry *e, 846static int compat_calc_entry(const struct arpt_entry *e,
857 const struct xt_table_info *info, 847 const struct xt_table_info *info,
858 void *base, struct xt_table_info *newinfo) 848 const void *base, struct xt_table_info *newinfo)
859{ 849{
860 struct arpt_entry_target *t; 850 const struct arpt_entry_target *t;
861 unsigned int entry_offset; 851 unsigned int entry_offset;
862 int off, i, ret; 852 int off, i, ret;
863 853
864 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 854 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
865 entry_offset = (void *)e - base; 855 entry_offset = (void *)e - base;
866 856
867 t = arpt_get_target(e); 857 t = arpt_get_target_c(e);
868 off += xt_compat_target_offset(t->u.kernel.target); 858 off += xt_compat_target_offset(t->u.kernel.target);
869 newinfo->size -= off; 859 newinfo->size -= off;
870 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 860 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
@@ -885,7 +875,9 @@ static int compat_calc_entry(struct arpt_entry *e,
885static int compat_table_info(const struct xt_table_info *info, 875static int compat_table_info(const struct xt_table_info *info,
886 struct xt_table_info *newinfo) 876 struct xt_table_info *newinfo)
887{ 877{
878 struct arpt_entry *iter;
888 void *loc_cpu_entry; 879 void *loc_cpu_entry;
880 int ret;
889 881
890 if (!newinfo || !info) 882 if (!newinfo || !info)
891 return -EINVAL; 883 return -EINVAL;
@@ -894,13 +886,17 @@ static int compat_table_info(const struct xt_table_info *info,
894 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 886 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
895 newinfo->initial_entries = 0; 887 newinfo->initial_entries = 0;
896 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 888 loc_cpu_entry = info->entries[raw_smp_processor_id()];
897 return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 889 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
898 compat_calc_entry, info, loc_cpu_entry, 890 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
899 newinfo); 891 if (ret != 0)
892 return ret;
893 }
894 return 0;
900} 895}
901#endif 896#endif
902 897
903static int get_info(struct net *net, void __user *user, int *len, int compat) 898static int get_info(struct net *net, void __user *user,
899 const int *len, int compat)
904{ 900{
905 char name[ARPT_TABLE_MAXNAMELEN]; 901 char name[ARPT_TABLE_MAXNAMELEN];
906 struct xt_table *t; 902 struct xt_table *t;
@@ -959,7 +955,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
959} 955}
960 956
961static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, 957static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
962 int *len) 958 const int *len)
963{ 959{
964 int ret; 960 int ret;
965 struct arpt_get_entries get; 961 struct arpt_get_entries get;
@@ -1010,6 +1006,7 @@ static int __do_replace(struct net *net, const char *name,
1010 struct xt_table_info *oldinfo; 1006 struct xt_table_info *oldinfo;
1011 struct xt_counters *counters; 1007 struct xt_counters *counters;
1012 void *loc_cpu_old_entry; 1008 void *loc_cpu_old_entry;
1009 struct arpt_entry *iter;
1013 1010
1014 ret = 0; 1011 ret = 0;
1015 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1012 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1053,8 +1050,8 @@ static int __do_replace(struct net *net, const char *name,
1053 1050
1054 /* Decrease module usage counts and free resource */ 1051 /* Decrease module usage counts and free resource */
1055 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1052 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1056 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1053 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1057 NULL); 1054 cleanup_entry(iter);
1058 1055
1059 xt_free_table_info(oldinfo); 1056 xt_free_table_info(oldinfo);
1060 if (copy_to_user(counters_ptr, counters, 1057 if (copy_to_user(counters_ptr, counters,
@@ -1073,12 +1070,14 @@ static int __do_replace(struct net *net, const char *name,
1073 return ret; 1070 return ret;
1074} 1071}
1075 1072
1076static int do_replace(struct net *net, void __user *user, unsigned int len) 1073static int do_replace(struct net *net, const void __user *user,
1074 unsigned int len)
1077{ 1075{
1078 int ret; 1076 int ret;
1079 struct arpt_replace tmp; 1077 struct arpt_replace tmp;
1080 struct xt_table_info *newinfo; 1078 struct xt_table_info *newinfo;
1081 void *loc_cpu_entry; 1079 void *loc_cpu_entry;
1080 struct arpt_entry *iter;
1082 1081
1083 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1082 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1084 return -EFAULT; 1083 return -EFAULT;
@@ -1099,9 +1098,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1099 goto free_newinfo; 1098 goto free_newinfo;
1100 } 1099 }
1101 1100
1102 ret = translate_table(tmp.name, tmp.valid_hooks, 1101 ret = translate_table(newinfo, loc_cpu_entry, &tmp);
1103 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1104 tmp.hook_entry, tmp.underflow);
1105 if (ret != 0) 1102 if (ret != 0)
1106 goto free_newinfo; 1103 goto free_newinfo;
1107 1104
@@ -1114,27 +1111,15 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1114 return 0; 1111 return 0;
1115 1112
1116 free_newinfo_untrans: 1113 free_newinfo_untrans:
1117 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1114 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1115 cleanup_entry(iter);
1118 free_newinfo: 1116 free_newinfo:
1119 xt_free_table_info(newinfo); 1117 xt_free_table_info(newinfo);
1120 return ret; 1118 return ret;
1121} 1119}
1122 1120
1123/* We're lazy, and add to the first CPU; overflow works its fey magic 1121static int do_add_counters(struct net *net, const void __user *user,
1124 * and everything is OK. */ 1122 unsigned int len, int compat)
1125static int
1126add_counter_to_entry(struct arpt_entry *e,
1127 const struct xt_counters addme[],
1128 unsigned int *i)
1129{
1130 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1131
1132 (*i)++;
1133 return 0;
1134}
1135
1136static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1137 int compat)
1138{ 1123{
1139 unsigned int i, curcpu; 1124 unsigned int i, curcpu;
1140 struct xt_counters_info tmp; 1125 struct xt_counters_info tmp;
@@ -1147,6 +1132,7 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1147 const struct xt_table_info *private; 1132 const struct xt_table_info *private;
1148 int ret = 0; 1133 int ret = 0;
1149 void *loc_cpu_entry; 1134 void *loc_cpu_entry;
1135 struct arpt_entry *iter;
1150#ifdef CONFIG_COMPAT 1136#ifdef CONFIG_COMPAT
1151 struct compat_xt_counters_info compat_tmp; 1137 struct compat_xt_counters_info compat_tmp;
1152 1138
@@ -1204,11 +1190,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1204 curcpu = smp_processor_id(); 1190 curcpu = smp_processor_id();
1205 loc_cpu_entry = private->entries[curcpu]; 1191 loc_cpu_entry = private->entries[curcpu];
1206 xt_info_wrlock(curcpu); 1192 xt_info_wrlock(curcpu);
1207 ARPT_ENTRY_ITERATE(loc_cpu_entry, 1193 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1208 private->size, 1194 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1209 add_counter_to_entry, 1195 ++i;
1210 paddc, 1196 }
1211 &i);
1212 xt_info_wrunlock(curcpu); 1197 xt_info_wrunlock(curcpu);
1213 unlock_up_free: 1198 unlock_up_free:
1214 local_bh_enable(); 1199 local_bh_enable();
@@ -1221,28 +1206,22 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1221} 1206}
1222 1207
1223#ifdef CONFIG_COMPAT 1208#ifdef CONFIG_COMPAT
1224static inline int 1209static inline void compat_release_entry(struct compat_arpt_entry *e)
1225compat_release_entry(struct compat_arpt_entry *e, unsigned int *i)
1226{ 1210{
1227 struct arpt_entry_target *t; 1211 struct arpt_entry_target *t;
1228 1212
1229 if (i && (*i)-- == 0)
1230 return 1;
1231
1232 t = compat_arpt_get_target(e); 1213 t = compat_arpt_get_target(e);
1233 module_put(t->u.kernel.target->me); 1214 module_put(t->u.kernel.target->me);
1234 return 0;
1235} 1215}
1236 1216
1237static inline int 1217static inline int
1238check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, 1218check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1239 struct xt_table_info *newinfo, 1219 struct xt_table_info *newinfo,
1240 unsigned int *size, 1220 unsigned int *size,
1241 unsigned char *base, 1221 const unsigned char *base,
1242 unsigned char *limit, 1222 const unsigned char *limit,
1243 unsigned int *hook_entries, 1223 const unsigned int *hook_entries,
1244 unsigned int *underflows, 1224 const unsigned int *underflows,
1245 unsigned int *i,
1246 const char *name) 1225 const char *name)
1247{ 1226{
1248 struct arpt_entry_target *t; 1227 struct arpt_entry_target *t;
@@ -1302,8 +1281,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1302 /* Clear counters and comefrom */ 1281 /* Clear counters and comefrom */
1303 memset(&e->counters, 0, sizeof(e->counters)); 1282 memset(&e->counters, 0, sizeof(e->counters));
1304 e->comefrom = 0; 1283 e->comefrom = 0;
1305
1306 (*i)++;
1307 return 0; 1284 return 0;
1308 1285
1309release_target: 1286release_target:
@@ -1347,19 +1324,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
1347 return ret; 1324 return ret;
1348} 1325}
1349 1326
1350static inline int compat_check_entry(struct arpt_entry *e, const char *name,
1351 unsigned int *i)
1352{
1353 int ret;
1354
1355 ret = check_target(e, name);
1356 if (ret)
1357 return ret;
1358
1359 (*i)++;
1360 return 0;
1361}
1362
1363static int translate_compat_table(const char *name, 1327static int translate_compat_table(const char *name,
1364 unsigned int valid_hooks, 1328 unsigned int valid_hooks,
1365 struct xt_table_info **pinfo, 1329 struct xt_table_info **pinfo,
@@ -1372,8 +1336,10 @@ static int translate_compat_table(const char *name,
1372 unsigned int i, j; 1336 unsigned int i, j;
1373 struct xt_table_info *newinfo, *info; 1337 struct xt_table_info *newinfo, *info;
1374 void *pos, *entry0, *entry1; 1338 void *pos, *entry0, *entry1;
1339 struct compat_arpt_entry *iter0;
1340 struct arpt_entry *iter1;
1375 unsigned int size; 1341 unsigned int size;
1376 int ret; 1342 int ret = 0;
1377 1343
1378 info = *pinfo; 1344 info = *pinfo;
1379 entry0 = *pentry0; 1345 entry0 = *pentry0;
@@ -1390,13 +1356,17 @@ static int translate_compat_table(const char *name,
1390 j = 0; 1356 j = 0;
1391 xt_compat_lock(NFPROTO_ARP); 1357 xt_compat_lock(NFPROTO_ARP);
1392 /* Walk through entries, checking offsets. */ 1358 /* Walk through entries, checking offsets. */
1393 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1359 xt_entry_foreach(iter0, entry0, total_size) {
1394 check_compat_entry_size_and_hooks, 1360 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1395 info, &size, entry0, 1361 entry0,
1396 entry0 + total_size, 1362 entry0 + total_size,
1397 hook_entries, underflows, &j, name); 1363 hook_entries,
1398 if (ret != 0) 1364 underflows,
1399 goto out_unlock; 1365 name);
1366 if (ret != 0)
1367 goto out_unlock;
1368 ++j;
1369 }
1400 1370
1401 ret = -EINVAL; 1371 ret = -EINVAL;
1402 if (j != number) { 1372 if (j != number) {
@@ -1435,9 +1405,12 @@ static int translate_compat_table(const char *name,
1435 entry1 = newinfo->entries[raw_smp_processor_id()]; 1405 entry1 = newinfo->entries[raw_smp_processor_id()];
1436 pos = entry1; 1406 pos = entry1;
1437 size = total_size; 1407 size = total_size;
1438 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1408 xt_entry_foreach(iter0, entry0, total_size) {
1439 compat_copy_entry_from_user, 1409 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1440 &pos, &size, name, newinfo, entry1); 1410 name, newinfo, entry1);
1411 if (ret != 0)
1412 break;
1413 }
1441 xt_compat_flush_offsets(NFPROTO_ARP); 1414 xt_compat_flush_offsets(NFPROTO_ARP);
1442 xt_compat_unlock(NFPROTO_ARP); 1415 xt_compat_unlock(NFPROTO_ARP);
1443 if (ret) 1416 if (ret)
@@ -1448,13 +1421,32 @@ static int translate_compat_table(const char *name,
1448 goto free_newinfo; 1421 goto free_newinfo;
1449 1422
1450 i = 0; 1423 i = 0;
1451 ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1424 xt_entry_foreach(iter1, entry1, newinfo->size) {
1452 name, &i); 1425 ret = check_target(iter1, name);
1426 if (ret != 0)
1427 break;
1428 ++i;
1429 }
1453 if (ret) { 1430 if (ret) {
1431 /*
1432 * The first i matches need cleanup_entry (calls ->destroy)
1433 * because they had called ->check already. The other j-i
1434 * entries need only release.
1435 */
1436 int skip = i;
1454 j -= i; 1437 j -= i;
1455 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1438 xt_entry_foreach(iter0, entry0, newinfo->size) {
1456 compat_release_entry, &j); 1439 if (skip-- > 0)
1457 ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1440 continue;
1441 if (j-- == 0)
1442 break;
1443 compat_release_entry(iter0);
1444 }
1445 xt_entry_foreach(iter1, entry1, newinfo->size) {
1446 if (i-- == 0)
1447 break;
1448 cleanup_entry(iter1);
1449 }
1458 xt_free_table_info(newinfo); 1450 xt_free_table_info(newinfo);
1459 return ret; 1451 return ret;
1460 } 1452 }
@@ -1472,7 +1464,11 @@ static int translate_compat_table(const char *name,
1472free_newinfo: 1464free_newinfo:
1473 xt_free_table_info(newinfo); 1465 xt_free_table_info(newinfo);
1474out: 1466out:
1475 COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1467 xt_entry_foreach(iter0, entry0, total_size) {
1468 if (j-- == 0)
1469 break;
1470 compat_release_entry(iter0);
1471 }
1476 return ret; 1472 return ret;
1477out_unlock: 1473out_unlock:
1478 xt_compat_flush_offsets(NFPROTO_ARP); 1474 xt_compat_flush_offsets(NFPROTO_ARP);
@@ -1499,6 +1495,7 @@ static int compat_do_replace(struct net *net, void __user *user,
1499 struct compat_arpt_replace tmp; 1495 struct compat_arpt_replace tmp;
1500 struct xt_table_info *newinfo; 1496 struct xt_table_info *newinfo;
1501 void *loc_cpu_entry; 1497 void *loc_cpu_entry;
1498 struct arpt_entry *iter;
1502 1499
1503 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1500 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1504 return -EFAULT; 1501 return -EFAULT;
@@ -1536,7 +1533,8 @@ static int compat_do_replace(struct net *net, void __user *user,
1536 return 0; 1533 return 0;
1537 1534
1538 free_newinfo_untrans: 1535 free_newinfo_untrans:
1539 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1536 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1537 cleanup_entry(iter);
1540 free_newinfo: 1538 free_newinfo:
1541 xt_free_table_info(newinfo); 1539 xt_free_table_info(newinfo);
1542 return ret; 1540 return ret;
@@ -1570,7 +1568,7 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1570static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, 1568static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1571 compat_uint_t *size, 1569 compat_uint_t *size,
1572 struct xt_counters *counters, 1570 struct xt_counters *counters,
1573 unsigned int *i) 1571 unsigned int i)
1574{ 1572{
1575 struct arpt_entry_target *t; 1573 struct arpt_entry_target *t;
1576 struct compat_arpt_entry __user *ce; 1574 struct compat_arpt_entry __user *ce;
@@ -1578,14 +1576,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1578 compat_uint_t origsize; 1576 compat_uint_t origsize;
1579 int ret; 1577 int ret;
1580 1578
1581 ret = -EFAULT;
1582 origsize = *size; 1579 origsize = *size;
1583 ce = (struct compat_arpt_entry __user *)*dstptr; 1580 ce = (struct compat_arpt_entry __user *)*dstptr;
1584 if (copy_to_user(ce, e, sizeof(struct arpt_entry))) 1581 if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
1585 goto out; 1582 copy_to_user(&ce->counters, &counters[i],
1586 1583 sizeof(counters[i])) != 0)
1587 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1584 return -EFAULT;
1588 goto out;
1589 1585
1590 *dstptr += sizeof(struct compat_arpt_entry); 1586 *dstptr += sizeof(struct compat_arpt_entry);
1591 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1587 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
@@ -1595,18 +1591,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1595 t = arpt_get_target(e); 1591 t = arpt_get_target(e);
1596 ret = xt_compat_target_to_user(t, dstptr, size); 1592 ret = xt_compat_target_to_user(t, dstptr, size);
1597 if (ret) 1593 if (ret)
1598 goto out; 1594 return ret;
1599 ret = -EFAULT;
1600 next_offset = e->next_offset - (origsize - *size); 1595 next_offset = e->next_offset - (origsize - *size);
1601 if (put_user(target_offset, &ce->target_offset)) 1596 if (put_user(target_offset, &ce->target_offset) != 0 ||
1602 goto out; 1597 put_user(next_offset, &ce->next_offset) != 0)
1603 if (put_user(next_offset, &ce->next_offset)) 1598 return -EFAULT;
1604 goto out;
1605
1606 (*i)++;
1607 return 0; 1599 return 0;
1608out:
1609 return ret;
1610} 1600}
1611 1601
1612static int compat_copy_entries_to_user(unsigned int total_size, 1602static int compat_copy_entries_to_user(unsigned int total_size,
@@ -1620,6 +1610,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
1620 int ret = 0; 1610 int ret = 0;
1621 void *loc_cpu_entry; 1611 void *loc_cpu_entry;
1622 unsigned int i = 0; 1612 unsigned int i = 0;
1613 struct arpt_entry *iter;
1623 1614
1624 counters = alloc_counters(table); 1615 counters = alloc_counters(table);
1625 if (IS_ERR(counters)) 1616 if (IS_ERR(counters))
@@ -1629,9 +1620,12 @@ static int compat_copy_entries_to_user(unsigned int total_size,
1629 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1620 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1630 pos = userptr; 1621 pos = userptr;
1631 size = total_size; 1622 size = total_size;
1632 ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1623 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1633 compat_copy_entry_to_user, 1624 ret = compat_copy_entry_to_user(iter, &pos,
1634 &pos, &size, counters, &i); 1625 &size, counters, i++);
1626 if (ret != 0)
1627 break;
1628 }
1635 vfree(counters); 1629 vfree(counters);
1636 return ret; 1630 return ret;
1637} 1631}
@@ -1799,12 +1793,7 @@ struct xt_table *arpt_register_table(struct net *net,
1799 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1793 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1800 memcpy(loc_cpu_entry, repl->entries, repl->size); 1794 memcpy(loc_cpu_entry, repl->entries, repl->size);
1801 1795
1802 ret = translate_table(table->name, table->valid_hooks, 1796 ret = translate_table(newinfo, loc_cpu_entry, repl);
1803 newinfo, loc_cpu_entry, repl->size,
1804 repl->num_entries,
1805 repl->hook_entry,
1806 repl->underflow);
1807
1808 duprintf("arpt_register_table: translate table gives %d\n", ret); 1797 duprintf("arpt_register_table: translate table gives %d\n", ret);
1809 if (ret != 0) 1798 if (ret != 0)
1810 goto out_free; 1799 goto out_free;
@@ -1827,13 +1816,14 @@ void arpt_unregister_table(struct xt_table *table)
1827 struct xt_table_info *private; 1816 struct xt_table_info *private;
1828 void *loc_cpu_entry; 1817 void *loc_cpu_entry;
1829 struct module *table_owner = table->me; 1818 struct module *table_owner = table->me;
1819 struct arpt_entry *iter;
1830 1820
1831 private = xt_unregister_table(table); 1821 private = xt_unregister_table(table);
1832 1822
1833 /* Decrease module usage counts and free resources */ 1823 /* Decrease module usage counts and free resources */
1834 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1824 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1835 ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, 1825 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1836 cleanup_entry, NULL); 1826 cleanup_entry(iter);
1837 if (private->number > private->initial_entries) 1827 if (private->number > private->initial_entries)
1838 module_put(table_owner); 1828 module_put(table_owner);
1839 xt_free_table_info(private); 1829 xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 97337601827a..79ca5e70d497 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -6,7 +6,9 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/netfilter/x_tables.h>
9#include <linux/netfilter_arp/arp_tables.h> 10#include <linux/netfilter_arp/arp_tables.h>
11#include <linux/slab.h>
10 12
11MODULE_LICENSE("GPL"); 13MODULE_LICENSE("GPL");
12MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 14MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
@@ -15,93 +17,37 @@ MODULE_DESCRIPTION("arptables filter table");
15#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ 17#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
16 (1 << NF_ARP_FORWARD)) 18 (1 << NF_ARP_FORWARD))
17 19
18static const struct
19{
20 struct arpt_replace repl;
21 struct arpt_standard entries[3];
22 struct arpt_error term;
23} initial_table __net_initdata = {
24 .repl = {
25 .name = "filter",
26 .valid_hooks = FILTER_VALID_HOOKS,
27 .num_entries = 4,
28 .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
29 .hook_entry = {
30 [NF_ARP_IN] = 0,
31 [NF_ARP_OUT] = sizeof(struct arpt_standard),
32 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
33 },
34 .underflow = {
35 [NF_ARP_IN] = 0,
36 [NF_ARP_OUT] = sizeof(struct arpt_standard),
37 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
38 },
39 },
40 .entries = {
41 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
42 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
43 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
44 },
45 .term = ARPT_ERROR_INIT,
46};
47
48static const struct xt_table packet_filter = { 20static const struct xt_table packet_filter = {
49 .name = "filter", 21 .name = "filter",
50 .valid_hooks = FILTER_VALID_HOOKS, 22 .valid_hooks = FILTER_VALID_HOOKS,
51 .me = THIS_MODULE, 23 .me = THIS_MODULE,
52 .af = NFPROTO_ARP, 24 .af = NFPROTO_ARP,
25 .priority = NF_IP_PRI_FILTER,
53}; 26};
54 27
55/* The work comes in here from netfilter.c */ 28/* The work comes in here from netfilter.c */
56static unsigned int arpt_in_hook(unsigned int hook, 29static unsigned int
57 struct sk_buff *skb, 30arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
58 const struct net_device *in, 31 const struct net_device *in, const struct net_device *out,
59 const struct net_device *out, 32 int (*okfn)(struct sk_buff *))
60 int (*okfn)(struct sk_buff *))
61{ 33{
62 return arpt_do_table(skb, hook, in, out, 34 const struct net *net = dev_net((in != NULL) ? in : out);
63 dev_net(in)->ipv4.arptable_filter);
64}
65 35
66static unsigned int arpt_out_hook(unsigned int hook, 36 return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
67 struct sk_buff *skb,
68 const struct net_device *in,
69 const struct net_device *out,
70 int (*okfn)(struct sk_buff *))
71{
72 return arpt_do_table(skb, hook, in, out,
73 dev_net(out)->ipv4.arptable_filter);
74} 37}
75 38
76static struct nf_hook_ops arpt_ops[] __read_mostly = { 39static struct nf_hook_ops *arpfilter_ops __read_mostly;
77 {
78 .hook = arpt_in_hook,
79 .owner = THIS_MODULE,
80 .pf = NFPROTO_ARP,
81 .hooknum = NF_ARP_IN,
82 .priority = NF_IP_PRI_FILTER,
83 },
84 {
85 .hook = arpt_out_hook,
86 .owner = THIS_MODULE,
87 .pf = NFPROTO_ARP,
88 .hooknum = NF_ARP_OUT,
89 .priority = NF_IP_PRI_FILTER,
90 },
91 {
92 .hook = arpt_in_hook,
93 .owner = THIS_MODULE,
94 .pf = NFPROTO_ARP,
95 .hooknum = NF_ARP_FORWARD,
96 .priority = NF_IP_PRI_FILTER,
97 },
98};
99 40
100static int __net_init arptable_filter_net_init(struct net *net) 41static int __net_init arptable_filter_net_init(struct net *net)
101{ 42{
102 /* Register table */ 43 struct arpt_replace *repl;
44
45 repl = arpt_alloc_initial_table(&packet_filter);
46 if (repl == NULL)
47 return -ENOMEM;
103 net->ipv4.arptable_filter = 48 net->ipv4.arptable_filter =
104 arpt_register_table(net, &packet_filter, &initial_table.repl); 49 arpt_register_table(net, &packet_filter, repl);
50 kfree(repl);
105 if (IS_ERR(net->ipv4.arptable_filter)) 51 if (IS_ERR(net->ipv4.arptable_filter))
106 return PTR_ERR(net->ipv4.arptable_filter); 52 return PTR_ERR(net->ipv4.arptable_filter);
107 return 0; 53 return 0;
@@ -125,9 +71,11 @@ static int __init arptable_filter_init(void)
125 if (ret < 0) 71 if (ret < 0)
126 return ret; 72 return ret;
127 73
128 ret = nf_register_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); 74 arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook);
129 if (ret < 0) 75 if (IS_ERR(arpfilter_ops)) {
76 ret = PTR_ERR(arpfilter_ops);
130 goto cleanup_table; 77 goto cleanup_table;
78 }
131 return ret; 79 return ret;
132 80
133cleanup_table: 81cleanup_table:
@@ -137,7 +85,7 @@ cleanup_table:
137 85
138static void __exit arptable_filter_fini(void) 86static void __exit arptable_filter_fini(void)
139{ 87{
140 nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); 88 xt_hook_unlink(&packet_filter, arpfilter_ops);
141 unregister_pernet_subsys(&arptable_filter_net_ops); 89 unregister_pernet_subsys(&arptable_filter_net_ops);
142} 90}
143 91
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 2855f1f38cbc..e2787048aa0a 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -26,6 +26,7 @@
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/slab.h>
29#include <net/net_namespace.h> 30#include <net/net_namespace.h>
30#include <net/sock.h> 31#include <net/sock.h>
31#include <net/route.h> 32#include <net/route.h>
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 3ce53cf13d5a..b29c66df8d1f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -28,6 +28,7 @@
28#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_ipv4/ip_tables.h> 29#include <linux/netfilter_ipv4/ip_tables.h>
30#include <net/netfilter/nf_log.h> 30#include <net/netfilter/nf_log.h>
31#include "../../netfilter/xt_repldata.h"
31 32
32MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -66,6 +67,12 @@ do { \
66#define inline 67#define inline
67#endif 68#endif
68 69
70void *ipt_alloc_initial_table(const struct xt_table *info)
71{
72 return xt_alloc_initial_table(ipt, IPT);
73}
74EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
75
69/* 76/*
70 We keep a set of rules for each CPU, so we can avoid write-locking 77 We keep a set of rules for each CPU, so we can avoid write-locking
71 them in the softirq when updating the counters and therefore 78 them in the softirq when updating the counters and therefore
@@ -169,7 +176,7 @@ ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
169 176
170/* Performance critical - called for every packet */ 177/* Performance critical - called for every packet */
171static inline bool 178static inline bool
172do_match(struct ipt_entry_match *m, const struct sk_buff *skb, 179do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
173 struct xt_match_param *par) 180 struct xt_match_param *par)
174{ 181{
175 par->match = m->u.kernel.match; 182 par->match = m->u.kernel.match;
@@ -184,7 +191,7 @@ do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
184 191
185/* Performance critical */ 192/* Performance critical */
186static inline struct ipt_entry * 193static inline struct ipt_entry *
187get_entry(void *base, unsigned int offset) 194get_entry(const void *base, unsigned int offset)
188{ 195{
189 return (struct ipt_entry *)(base + offset); 196 return (struct ipt_entry *)(base + offset);
190} 197}
@@ -199,6 +206,13 @@ static inline bool unconditional(const struct ipt_ip *ip)
199#undef FWINV 206#undef FWINV
200} 207}
201 208
209/* for const-correctness */
210static inline const struct ipt_entry_target *
211ipt_get_target_c(const struct ipt_entry *e)
212{
213 return ipt_get_target((struct ipt_entry *)e);
214}
215
202#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 216#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
203 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 217 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
204static const char *const hooknames[] = { 218static const char *const hooknames[] = {
@@ -233,11 +247,11 @@ static struct nf_loginfo trace_loginfo = {
233 247
234/* Mildly perf critical (only if packet tracing is on) */ 248/* Mildly perf critical (only if packet tracing is on) */
235static inline int 249static inline int
236get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, 250get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
237 const char *hookname, const char **chainname, 251 const char *hookname, const char **chainname,
238 const char **comment, unsigned int *rulenum) 252 const char **comment, unsigned int *rulenum)
239{ 253{
240 struct ipt_standard_target *t = (void *)ipt_get_target(s); 254 const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
241 255
242 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { 256 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
243 /* Head of user chain: ERROR target with chainname */ 257 /* Head of user chain: ERROR target with chainname */
@@ -263,17 +277,18 @@ get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
263 return 0; 277 return 0;
264} 278}
265 279
266static void trace_packet(struct sk_buff *skb, 280static void trace_packet(const struct sk_buff *skb,
267 unsigned int hook, 281 unsigned int hook,
268 const struct net_device *in, 282 const struct net_device *in,
269 const struct net_device *out, 283 const struct net_device *out,
270 const char *tablename, 284 const char *tablename,
271 struct xt_table_info *private, 285 const struct xt_table_info *private,
272 struct ipt_entry *e) 286 const struct ipt_entry *e)
273{ 287{
274 void *table_base; 288 const void *table_base;
275 const struct ipt_entry *root; 289 const struct ipt_entry *root;
276 const char *hookname, *chainname, *comment; 290 const char *hookname, *chainname, *comment;
291 const struct ipt_entry *iter;
277 unsigned int rulenum = 0; 292 unsigned int rulenum = 0;
278 293
279 table_base = private->entries[smp_processor_id()]; 294 table_base = private->entries[smp_processor_id()];
@@ -282,10 +297,10 @@ static void trace_packet(struct sk_buff *skb,
282 hookname = chainname = hooknames[hook]; 297 hookname = chainname = hooknames[hook];
283 comment = comments[NF_IP_TRACE_COMMENT_RULE]; 298 comment = comments[NF_IP_TRACE_COMMENT_RULE];
284 299
285 IPT_ENTRY_ITERATE(root, 300 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
286 private->size - private->hook_entry[hook], 301 if (get_chainname_rulenum(iter, e, hookname,
287 get_chainname_rulenum, 302 &chainname, &comment, &rulenum) != 0)
288 e, hookname, &chainname, &comment, &rulenum); 303 break;
289 304
290 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, 305 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
291 "TRACE: %s:%s:%s:%u ", 306 "TRACE: %s:%s:%s:%u ",
@@ -315,9 +330,9 @@ ipt_do_table(struct sk_buff *skb,
315 /* Initializing verdict to NF_DROP keeps gcc happy. */ 330 /* Initializing verdict to NF_DROP keeps gcc happy. */
316 unsigned int verdict = NF_DROP; 331 unsigned int verdict = NF_DROP;
317 const char *indev, *outdev; 332 const char *indev, *outdev;
318 void *table_base; 333 const void *table_base;
319 struct ipt_entry *e, *back; 334 struct ipt_entry *e, *back;
320 struct xt_table_info *private; 335 const struct xt_table_info *private;
321 struct xt_match_param mtpar; 336 struct xt_match_param mtpar;
322 struct xt_target_param tgpar; 337 struct xt_target_param tgpar;
323 338
@@ -350,17 +365,22 @@ ipt_do_table(struct sk_buff *skb,
350 back = get_entry(table_base, private->underflow[hook]); 365 back = get_entry(table_base, private->underflow[hook]);
351 366
352 do { 367 do {
353 struct ipt_entry_target *t; 368 const struct ipt_entry_target *t;
369 const struct xt_entry_match *ematch;
354 370
355 IP_NF_ASSERT(e); 371 IP_NF_ASSERT(e);
356 IP_NF_ASSERT(back); 372 IP_NF_ASSERT(back);
357 if (!ip_packet_match(ip, indev, outdev, 373 if (!ip_packet_match(ip, indev, outdev,
358 &e->ip, mtpar.fragoff) || 374 &e->ip, mtpar.fragoff)) {
359 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 375 no_match:
360 e = ipt_next_entry(e); 376 e = ipt_next_entry(e);
361 continue; 377 continue;
362 } 378 }
363 379
380 xt_ematch_foreach(ematch, e)
381 if (do_match(ematch, skb, &mtpar) != 0)
382 goto no_match;
383
364 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); 384 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
365 385
366 t = ipt_get_target(e); 386 t = ipt_get_target(e);
@@ -443,7 +463,7 @@ ipt_do_table(struct sk_buff *skb,
443/* Figures out from what hook each rule can be called: returns 0 if 463/* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */ 464 there are loops. Puts hook bitmask in comefrom. */
445static int 465static int
446mark_source_chains(struct xt_table_info *newinfo, 466mark_source_chains(const struct xt_table_info *newinfo,
447 unsigned int valid_hooks, void *entry0) 467 unsigned int valid_hooks, void *entry0)
448{ 468{
449 unsigned int hook; 469 unsigned int hook;
@@ -461,8 +481,8 @@ mark_source_chains(struct xt_table_info *newinfo,
461 e->counters.pcnt = pos; 481 e->counters.pcnt = pos;
462 482
463 for (;;) { 483 for (;;) {
464 struct ipt_standard_target *t 484 const struct ipt_standard_target *t
465 = (void *)ipt_get_target(e); 485 = (void *)ipt_get_target_c(e);
466 int visited = e->comefrom & (1 << hook); 486 int visited = e->comefrom & (1 << hook);
467 487
468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 488 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -552,27 +572,23 @@ mark_source_chains(struct xt_table_info *newinfo,
552 return 1; 572 return 1;
553} 573}
554 574
555static int 575static void cleanup_match(struct ipt_entry_match *m, struct net *net)
556cleanup_match(struct ipt_entry_match *m, unsigned int *i)
557{ 576{
558 struct xt_mtdtor_param par; 577 struct xt_mtdtor_param par;
559 578
560 if (i && (*i)-- == 0) 579 par.net = net;
561 return 1;
562
563 par.match = m->u.kernel.match; 580 par.match = m->u.kernel.match;
564 par.matchinfo = m->data; 581 par.matchinfo = m->data;
565 par.family = NFPROTO_IPV4; 582 par.family = NFPROTO_IPV4;
566 if (par.match->destroy != NULL) 583 if (par.match->destroy != NULL)
567 par.match->destroy(&par); 584 par.match->destroy(&par);
568 module_put(par.match->me); 585 module_put(par.match->me);
569 return 0;
570} 586}
571 587
572static int 588static int
573check_entry(struct ipt_entry *e, const char *name) 589check_entry(const struct ipt_entry *e, const char *name)
574{ 590{
575 struct ipt_entry_target *t; 591 const struct ipt_entry_target *t;
576 592
577 if (!ip_checkentry(&e->ip)) { 593 if (!ip_checkentry(&e->ip)) {
578 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 594 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -583,7 +599,7 @@ check_entry(struct ipt_entry *e, const char *name)
583 e->next_offset) 599 e->next_offset)
584 return -EINVAL; 600 return -EINVAL;
585 601
586 t = ipt_get_target(e); 602 t = ipt_get_target_c(e);
587 if (e->target_offset + t->u.target_size > e->next_offset) 603 if (e->target_offset + t->u.target_size > e->next_offset)
588 return -EINVAL; 604 return -EINVAL;
589 605
@@ -591,8 +607,7 @@ check_entry(struct ipt_entry *e, const char *name)
591} 607}
592 608
593static int 609static int
594check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 610check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
595 unsigned int *i)
596{ 611{
597 const struct ipt_ip *ip = par->entryinfo; 612 const struct ipt_ip *ip = par->entryinfo;
598 int ret; 613 int ret;
@@ -607,13 +622,11 @@ check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
607 par.match->name); 622 par.match->name);
608 return ret; 623 return ret;
609 } 624 }
610 ++*i;
611 return 0; 625 return 0;
612} 626}
613 627
614static int 628static int
615find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 629find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
616 unsigned int *i)
617{ 630{
618 struct xt_match *match; 631 struct xt_match *match;
619 int ret; 632 int ret;
@@ -627,7 +640,7 @@ find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
627 } 640 }
628 m->u.kernel.match = match; 641 m->u.kernel.match = match;
629 642
630 ret = check_match(m, par, i); 643 ret = check_match(m, par);
631 if (ret) 644 if (ret)
632 goto err; 645 goto err;
633 646
@@ -637,10 +650,11 @@ err:
637 return ret; 650 return ret;
638} 651}
639 652
640static int check_target(struct ipt_entry *e, const char *name) 653static int check_target(struct ipt_entry *e, struct net *net, const char *name)
641{ 654{
642 struct ipt_entry_target *t = ipt_get_target(e); 655 struct ipt_entry_target *t = ipt_get_target(e);
643 struct xt_tgchk_param par = { 656 struct xt_tgchk_param par = {
657 .net = net,
644 .table = name, 658 .table = name,
645 .entryinfo = e, 659 .entryinfo = e,
646 .target = t->u.kernel.target, 660 .target = t->u.kernel.target,
@@ -661,27 +675,32 @@ static int check_target(struct ipt_entry *e, const char *name)
661} 675}
662 676
663static int 677static int
664find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, 678find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
665 unsigned int *i) 679 unsigned int size)
666{ 680{
667 struct ipt_entry_target *t; 681 struct ipt_entry_target *t;
668 struct xt_target *target; 682 struct xt_target *target;
669 int ret; 683 int ret;
670 unsigned int j; 684 unsigned int j;
671 struct xt_mtchk_param mtpar; 685 struct xt_mtchk_param mtpar;
686 struct xt_entry_match *ematch;
672 687
673 ret = check_entry(e, name); 688 ret = check_entry(e, name);
674 if (ret) 689 if (ret)
675 return ret; 690 return ret;
676 691
677 j = 0; 692 j = 0;
693 mtpar.net = net;
678 mtpar.table = name; 694 mtpar.table = name;
679 mtpar.entryinfo = &e->ip; 695 mtpar.entryinfo = &e->ip;
680 mtpar.hook_mask = e->comefrom; 696 mtpar.hook_mask = e->comefrom;
681 mtpar.family = NFPROTO_IPV4; 697 mtpar.family = NFPROTO_IPV4;
682 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 698 xt_ematch_foreach(ematch, e) {
683 if (ret != 0) 699 ret = find_check_match(ematch, &mtpar);
684 goto cleanup_matches; 700 if (ret != 0)
701 goto cleanup_matches;
702 ++j;
703 }
685 704
686 t = ipt_get_target(e); 705 t = ipt_get_target(e);
687 target = try_then_request_module(xt_find_target(AF_INET, 706 target = try_then_request_module(xt_find_target(AF_INET,
@@ -695,27 +714,29 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
695 } 714 }
696 t->u.kernel.target = target; 715 t->u.kernel.target = target;
697 716
698 ret = check_target(e, name); 717 ret = check_target(e, net, name);
699 if (ret) 718 if (ret)
700 goto err; 719 goto err;
701
702 (*i)++;
703 return 0; 720 return 0;
704 err: 721 err:
705 module_put(t->u.kernel.target->me); 722 module_put(t->u.kernel.target->me);
706 cleanup_matches: 723 cleanup_matches:
707 IPT_MATCH_ITERATE(e, cleanup_match, &j); 724 xt_ematch_foreach(ematch, e) {
725 if (j-- == 0)
726 break;
727 cleanup_match(ematch, net);
728 }
708 return ret; 729 return ret;
709} 730}
710 731
711static bool check_underflow(struct ipt_entry *e) 732static bool check_underflow(const struct ipt_entry *e)
712{ 733{
713 const struct ipt_entry_target *t; 734 const struct ipt_entry_target *t;
714 unsigned int verdict; 735 unsigned int verdict;
715 736
716 if (!unconditional(&e->ip)) 737 if (!unconditional(&e->ip))
717 return false; 738 return false;
718 t = ipt_get_target(e); 739 t = ipt_get_target_c(e);
719 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 740 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
720 return false; 741 return false;
721 verdict = ((struct ipt_standard_target *)t)->verdict; 742 verdict = ((struct ipt_standard_target *)t)->verdict;
@@ -726,12 +747,11 @@ static bool check_underflow(struct ipt_entry *e)
726static int 747static int
727check_entry_size_and_hooks(struct ipt_entry *e, 748check_entry_size_and_hooks(struct ipt_entry *e,
728 struct xt_table_info *newinfo, 749 struct xt_table_info *newinfo,
729 unsigned char *base, 750 const unsigned char *base,
730 unsigned char *limit, 751 const unsigned char *limit,
731 const unsigned int *hook_entries, 752 const unsigned int *hook_entries,
732 const unsigned int *underflows, 753 const unsigned int *underflows,
733 unsigned int valid_hooks, 754 unsigned int valid_hooks)
734 unsigned int *i)
735{ 755{
736 unsigned int h; 756 unsigned int h;
737 757
@@ -768,50 +788,42 @@ check_entry_size_and_hooks(struct ipt_entry *e,
768 /* Clear counters and comefrom */ 788 /* Clear counters and comefrom */
769 e->counters = ((struct xt_counters) { 0, 0 }); 789 e->counters = ((struct xt_counters) { 0, 0 });
770 e->comefrom = 0; 790 e->comefrom = 0;
771
772 (*i)++;
773 return 0; 791 return 0;
774} 792}
775 793
776static int 794static void
777cleanup_entry(struct ipt_entry *e, unsigned int *i) 795cleanup_entry(struct ipt_entry *e, struct net *net)
778{ 796{
779 struct xt_tgdtor_param par; 797 struct xt_tgdtor_param par;
780 struct ipt_entry_target *t; 798 struct ipt_entry_target *t;
781 799 struct xt_entry_match *ematch;
782 if (i && (*i)-- == 0)
783 return 1;
784 800
785 /* Cleanup all matches */ 801 /* Cleanup all matches */
786 IPT_MATCH_ITERATE(e, cleanup_match, NULL); 802 xt_ematch_foreach(ematch, e)
803 cleanup_match(ematch, net);
787 t = ipt_get_target(e); 804 t = ipt_get_target(e);
788 805
806 par.net = net;
789 par.target = t->u.kernel.target; 807 par.target = t->u.kernel.target;
790 par.targinfo = t->data; 808 par.targinfo = t->data;
791 par.family = NFPROTO_IPV4; 809 par.family = NFPROTO_IPV4;
792 if (par.target->destroy != NULL) 810 if (par.target->destroy != NULL)
793 par.target->destroy(&par); 811 par.target->destroy(&par);
794 module_put(par.target->me); 812 module_put(par.target->me);
795 return 0;
796} 813}
797 814
798/* Checks and translates the user-supplied table segment (held in 815/* Checks and translates the user-supplied table segment (held in
799 newinfo) */ 816 newinfo) */
800static int 817static int
801translate_table(const char *name, 818translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
802 unsigned int valid_hooks, 819 const struct ipt_replace *repl)
803 struct xt_table_info *newinfo,
804 void *entry0,
805 unsigned int size,
806 unsigned int number,
807 const unsigned int *hook_entries,
808 const unsigned int *underflows)
809{ 820{
821 struct ipt_entry *iter;
810 unsigned int i; 822 unsigned int i;
811 int ret; 823 int ret = 0;
812 824
813 newinfo->size = size; 825 newinfo->size = repl->size;
814 newinfo->number = number; 826 newinfo->number = repl->num_entries;
815 827
816 /* Init all hooks to impossible value. */ 828 /* Init all hooks to impossible value. */
817 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -822,49 +834,58 @@ translate_table(const char *name,
822 duprintf("translate_table: size %u\n", newinfo->size); 834 duprintf("translate_table: size %u\n", newinfo->size);
823 i = 0; 835 i = 0;
824 /* Walk through entries, checking offsets. */ 836 /* Walk through entries, checking offsets. */
825 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 837 xt_entry_foreach(iter, entry0, newinfo->size) {
826 check_entry_size_and_hooks, 838 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
827 newinfo, 839 entry0 + repl->size,
828 entry0, 840 repl->hook_entry,
829 entry0 + size, 841 repl->underflow,
830 hook_entries, underflows, valid_hooks, &i); 842 repl->valid_hooks);
831 if (ret != 0) 843 if (ret != 0)
832 return ret; 844 return ret;
845 ++i;
846 }
833 847
834 if (i != number) { 848 if (i != repl->num_entries) {
835 duprintf("translate_table: %u not %u entries\n", 849 duprintf("translate_table: %u not %u entries\n",
836 i, number); 850 i, repl->num_entries);
837 return -EINVAL; 851 return -EINVAL;
838 } 852 }
839 853
840 /* Check hooks all assigned */ 854 /* Check hooks all assigned */
841 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 855 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
842 /* Only hooks which are valid */ 856 /* Only hooks which are valid */
843 if (!(valid_hooks & (1 << i))) 857 if (!(repl->valid_hooks & (1 << i)))
844 continue; 858 continue;
845 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 859 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
846 duprintf("Invalid hook entry %u %u\n", 860 duprintf("Invalid hook entry %u %u\n",
847 i, hook_entries[i]); 861 i, repl->hook_entry[i]);
848 return -EINVAL; 862 return -EINVAL;
849 } 863 }
850 if (newinfo->underflow[i] == 0xFFFFFFFF) { 864 if (newinfo->underflow[i] == 0xFFFFFFFF) {
851 duprintf("Invalid underflow %u %u\n", 865 duprintf("Invalid underflow %u %u\n",
852 i, underflows[i]); 866 i, repl->underflow[i]);
853 return -EINVAL; 867 return -EINVAL;
854 } 868 }
855 } 869 }
856 870
857 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 871 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
858 return -ELOOP; 872 return -ELOOP;
859 873
860 /* Finally, each sanity check must pass */ 874 /* Finally, each sanity check must pass */
861 i = 0; 875 i = 0;
862 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 876 xt_entry_foreach(iter, entry0, newinfo->size) {
863 find_check_entry, name, size, &i); 877 ret = find_check_entry(iter, net, repl->name, repl->size);
878 if (ret != 0)
879 break;
880 ++i;
881 }
864 882
865 if (ret != 0) { 883 if (ret != 0) {
866 IPT_ENTRY_ITERATE(entry0, newinfo->size, 884 xt_entry_foreach(iter, entry0, newinfo->size) {
867 cleanup_entry, &i); 885 if (i-- == 0)
886 break;
887 cleanup_entry(iter, net);
888 }
868 return ret; 889 return ret;
869 } 890 }
870 891
@@ -877,33 +898,11 @@ translate_table(const char *name,
877 return ret; 898 return ret;
878} 899}
879 900
880/* Gets counters. */
881static inline int
882add_entry_to_counter(const struct ipt_entry *e,
883 struct xt_counters total[],
884 unsigned int *i)
885{
886 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
887
888 (*i)++;
889 return 0;
890}
891
892static inline int
893set_entry_to_counter(const struct ipt_entry *e,
894 struct ipt_counters total[],
895 unsigned int *i)
896{
897 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
898
899 (*i)++;
900 return 0;
901}
902
903static void 901static void
904get_counters(const struct xt_table_info *t, 902get_counters(const struct xt_table_info *t,
905 struct xt_counters counters[]) 903 struct xt_counters counters[])
906{ 904{
905 struct ipt_entry *iter;
907 unsigned int cpu; 906 unsigned int cpu;
908 unsigned int i; 907 unsigned int i;
909 unsigned int curcpu; 908 unsigned int curcpu;
@@ -919,32 +918,32 @@ get_counters(const struct xt_table_info *t,
919 curcpu = smp_processor_id(); 918 curcpu = smp_processor_id();
920 919
921 i = 0; 920 i = 0;
922 IPT_ENTRY_ITERATE(t->entries[curcpu], 921 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
923 t->size, 922 SET_COUNTER(counters[i], iter->counters.bcnt,
924 set_entry_to_counter, 923 iter->counters.pcnt);
925 counters, 924 ++i;
926 &i); 925 }
927 926
928 for_each_possible_cpu(cpu) { 927 for_each_possible_cpu(cpu) {
929 if (cpu == curcpu) 928 if (cpu == curcpu)
930 continue; 929 continue;
931 i = 0; 930 i = 0;
932 xt_info_wrlock(cpu); 931 xt_info_wrlock(cpu);
933 IPT_ENTRY_ITERATE(t->entries[cpu], 932 xt_entry_foreach(iter, t->entries[cpu], t->size) {
934 t->size, 933 ADD_COUNTER(counters[i], iter->counters.bcnt,
935 add_entry_to_counter, 934 iter->counters.pcnt);
936 counters, 935 ++i; /* macro does multi eval of i */
937 &i); 936 }
938 xt_info_wrunlock(cpu); 937 xt_info_wrunlock(cpu);
939 } 938 }
940 local_bh_enable(); 939 local_bh_enable();
941} 940}
942 941
943static struct xt_counters * alloc_counters(struct xt_table *table) 942static struct xt_counters *alloc_counters(const struct xt_table *table)
944{ 943{
945 unsigned int countersize; 944 unsigned int countersize;
946 struct xt_counters *counters; 945 struct xt_counters *counters;
947 struct xt_table_info *private = table->private; 946 const struct xt_table_info *private = table->private;
948 947
949 /* We need atomic snapshot of counters: rest doesn't change 948 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care 949 (other than comefrom, which userspace doesn't care
@@ -962,11 +961,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
962 961
963static int 962static int
964copy_entries_to_user(unsigned int total_size, 963copy_entries_to_user(unsigned int total_size,
965 struct xt_table *table, 964 const struct xt_table *table,
966 void __user *userptr) 965 void __user *userptr)
967{ 966{
968 unsigned int off, num; 967 unsigned int off, num;
969 struct ipt_entry *e; 968 const struct ipt_entry *e;
970 struct xt_counters *counters; 969 struct xt_counters *counters;
971 const struct xt_table_info *private = table->private; 970 const struct xt_table_info *private = table->private;
972 int ret = 0; 971 int ret = 0;
@@ -1018,7 +1017,7 @@ copy_entries_to_user(unsigned int total_size,
1018 } 1017 }
1019 } 1018 }
1020 1019
1021 t = ipt_get_target(e); 1020 t = ipt_get_target_c(e);
1022 if (copy_to_user(userptr + off + e->target_offset 1021 if (copy_to_user(userptr + off + e->target_offset
1023 + offsetof(struct ipt_entry_target, 1022 + offsetof(struct ipt_entry_target,
1024 u.user.name), 1023 u.user.name),
@@ -1035,7 +1034,7 @@ copy_entries_to_user(unsigned int total_size,
1035} 1034}
1036 1035
1037#ifdef CONFIG_COMPAT 1036#ifdef CONFIG_COMPAT
1038static void compat_standard_from_user(void *dst, void *src) 1037static void compat_standard_from_user(void *dst, const void *src)
1039{ 1038{
1040 int v = *(compat_int_t *)src; 1039 int v = *(compat_int_t *)src;
1041 1040
@@ -1044,7 +1043,7 @@ static void compat_standard_from_user(void *dst, void *src)
1044 memcpy(dst, &v, sizeof(v)); 1043 memcpy(dst, &v, sizeof(v));
1045} 1044}
1046 1045
1047static int compat_standard_to_user(void __user *dst, void *src) 1046static int compat_standard_to_user(void __user *dst, const void *src)
1048{ 1047{
1049 compat_int_t cv = *(int *)src; 1048 compat_int_t cv = *(int *)src;
1050 1049
@@ -1053,25 +1052,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
1053 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1052 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1054} 1053}
1055 1054
1056static inline int 1055static int compat_calc_entry(const struct ipt_entry *e,
1057compat_calc_match(struct ipt_entry_match *m, int *size)
1058{
1059 *size += xt_compat_match_offset(m->u.kernel.match);
1060 return 0;
1061}
1062
1063static int compat_calc_entry(struct ipt_entry *e,
1064 const struct xt_table_info *info, 1056 const struct xt_table_info *info,
1065 void *base, struct xt_table_info *newinfo) 1057 const void *base, struct xt_table_info *newinfo)
1066{ 1058{
1067 struct ipt_entry_target *t; 1059 const struct xt_entry_match *ematch;
1060 const struct ipt_entry_target *t;
1068 unsigned int entry_offset; 1061 unsigned int entry_offset;
1069 int off, i, ret; 1062 int off, i, ret;
1070 1063
1071 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1064 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1072 entry_offset = (void *)e - base; 1065 entry_offset = (void *)e - base;
1073 IPT_MATCH_ITERATE(e, compat_calc_match, &off); 1066 xt_ematch_foreach(ematch, e)
1074 t = ipt_get_target(e); 1067 off += xt_compat_match_offset(ematch->u.kernel.match);
1068 t = ipt_get_target_c(e);
1075 off += xt_compat_target_offset(t->u.kernel.target); 1069 off += xt_compat_target_offset(t->u.kernel.target);
1076 newinfo->size -= off; 1070 newinfo->size -= off;
1077 ret = xt_compat_add_offset(AF_INET, entry_offset, off); 1071 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
@@ -1092,7 +1086,9 @@ static int compat_calc_entry(struct ipt_entry *e,
1092static int compat_table_info(const struct xt_table_info *info, 1086static int compat_table_info(const struct xt_table_info *info,
1093 struct xt_table_info *newinfo) 1087 struct xt_table_info *newinfo)
1094{ 1088{
1089 struct ipt_entry *iter;
1095 void *loc_cpu_entry; 1090 void *loc_cpu_entry;
1091 int ret;
1096 1092
1097 if (!newinfo || !info) 1093 if (!newinfo || !info)
1098 return -EINVAL; 1094 return -EINVAL;
@@ -1101,13 +1097,17 @@ static int compat_table_info(const struct xt_table_info *info,
1101 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1097 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1102 newinfo->initial_entries = 0; 1098 newinfo->initial_entries = 0;
1103 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1099 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1104 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 1100 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1105 compat_calc_entry, info, loc_cpu_entry, 1101 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1106 newinfo); 1102 if (ret != 0)
1103 return ret;
1104 }
1105 return 0;
1107} 1106}
1108#endif 1107#endif
1109 1108
1110static int get_info(struct net *net, void __user *user, int *len, int compat) 1109static int get_info(struct net *net, void __user *user,
1110 const int *len, int compat)
1111{ 1111{
1112 char name[IPT_TABLE_MAXNAMELEN]; 1112 char name[IPT_TABLE_MAXNAMELEN];
1113 struct xt_table *t; 1113 struct xt_table *t;
@@ -1167,7 +1167,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1167} 1167}
1168 1168
1169static int 1169static int
1170get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len) 1170get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1171 const int *len)
1171{ 1172{
1172 int ret; 1173 int ret;
1173 struct ipt_get_entries get; 1174 struct ipt_get_entries get;
@@ -1215,6 +1216,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1215 struct xt_table_info *oldinfo; 1216 struct xt_table_info *oldinfo;
1216 struct xt_counters *counters; 1217 struct xt_counters *counters;
1217 void *loc_cpu_old_entry; 1218 void *loc_cpu_old_entry;
1219 struct ipt_entry *iter;
1218 1220
1219 ret = 0; 1221 ret = 0;
1220 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1222 counters = vmalloc(num_counters * sizeof(struct xt_counters));
@@ -1257,8 +1259,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1257 1259
1258 /* Decrease module usage counts and free resource */ 1260 /* Decrease module usage counts and free resource */
1259 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1261 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1260 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1262 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1261 NULL); 1263 cleanup_entry(iter, net);
1264
1262 xt_free_table_info(oldinfo); 1265 xt_free_table_info(oldinfo);
1263 if (copy_to_user(counters_ptr, counters, 1266 if (copy_to_user(counters_ptr, counters,
1264 sizeof(struct xt_counters) * num_counters) != 0) 1267 sizeof(struct xt_counters) * num_counters) != 0)
@@ -1277,12 +1280,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1277} 1280}
1278 1281
1279static int 1282static int
1280do_replace(struct net *net, void __user *user, unsigned int len) 1283do_replace(struct net *net, const void __user *user, unsigned int len)
1281{ 1284{
1282 int ret; 1285 int ret;
1283 struct ipt_replace tmp; 1286 struct ipt_replace tmp;
1284 struct xt_table_info *newinfo; 1287 struct xt_table_info *newinfo;
1285 void *loc_cpu_entry; 1288 void *loc_cpu_entry;
1289 struct ipt_entry *iter;
1286 1290
1287 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1291 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1288 return -EFAULT; 1292 return -EFAULT;
@@ -1303,9 +1307,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1303 goto free_newinfo; 1307 goto free_newinfo;
1304 } 1308 }
1305 1309
1306 ret = translate_table(tmp.name, tmp.valid_hooks, 1310 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1307 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1308 tmp.hook_entry, tmp.underflow);
1309 if (ret != 0) 1311 if (ret != 0)
1310 goto free_newinfo; 1312 goto free_newinfo;
1311 1313
@@ -1318,27 +1320,16 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1318 return 0; 1320 return 0;
1319 1321
1320 free_newinfo_untrans: 1322 free_newinfo_untrans:
1321 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1323 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1324 cleanup_entry(iter, net);
1322 free_newinfo: 1325 free_newinfo:
1323 xt_free_table_info(newinfo); 1326 xt_free_table_info(newinfo);
1324 return ret; 1327 return ret;
1325} 1328}
1326 1329
1327/* We're lazy, and add to the first CPU; overflow works its fey magic
1328 * and everything is OK. */
1329static int 1330static int
1330add_counter_to_entry(struct ipt_entry *e, 1331do_add_counters(struct net *net, const void __user *user,
1331 const struct xt_counters addme[], 1332 unsigned int len, int compat)
1332 unsigned int *i)
1333{
1334 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1335
1336 (*i)++;
1337 return 0;
1338}
1339
1340static int
1341do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1342{ 1333{
1343 unsigned int i, curcpu; 1334 unsigned int i, curcpu;
1344 struct xt_counters_info tmp; 1335 struct xt_counters_info tmp;
@@ -1351,6 +1342,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1351 const struct xt_table_info *private; 1342 const struct xt_table_info *private;
1352 int ret = 0; 1343 int ret = 0;
1353 void *loc_cpu_entry; 1344 void *loc_cpu_entry;
1345 struct ipt_entry *iter;
1354#ifdef CONFIG_COMPAT 1346#ifdef CONFIG_COMPAT
1355 struct compat_xt_counters_info compat_tmp; 1347 struct compat_xt_counters_info compat_tmp;
1356 1348
@@ -1408,11 +1400,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1408 curcpu = smp_processor_id(); 1400 curcpu = smp_processor_id();
1409 loc_cpu_entry = private->entries[curcpu]; 1401 loc_cpu_entry = private->entries[curcpu];
1410 xt_info_wrlock(curcpu); 1402 xt_info_wrlock(curcpu);
1411 IPT_ENTRY_ITERATE(loc_cpu_entry, 1403 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1412 private->size, 1404 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1413 add_counter_to_entry, 1405 ++i;
1414 paddc, 1406 }
1415 &i);
1416 xt_info_wrunlock(curcpu); 1407 xt_info_wrunlock(curcpu);
1417 unlock_up_free: 1408 unlock_up_free:
1418 local_bh_enable(); 1409 local_bh_enable();
@@ -1440,45 +1431,40 @@ struct compat_ipt_replace {
1440static int 1431static int
1441compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, 1432compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1442 unsigned int *size, struct xt_counters *counters, 1433 unsigned int *size, struct xt_counters *counters,
1443 unsigned int *i) 1434 unsigned int i)
1444{ 1435{
1445 struct ipt_entry_target *t; 1436 struct ipt_entry_target *t;
1446 struct compat_ipt_entry __user *ce; 1437 struct compat_ipt_entry __user *ce;
1447 u_int16_t target_offset, next_offset; 1438 u_int16_t target_offset, next_offset;
1448 compat_uint_t origsize; 1439 compat_uint_t origsize;
1449 int ret; 1440 const struct xt_entry_match *ematch;
1441 int ret = 0;
1450 1442
1451 ret = -EFAULT;
1452 origsize = *size; 1443 origsize = *size;
1453 ce = (struct compat_ipt_entry __user *)*dstptr; 1444 ce = (struct compat_ipt_entry __user *)*dstptr;
1454 if (copy_to_user(ce, e, sizeof(struct ipt_entry))) 1445 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1455 goto out; 1446 copy_to_user(&ce->counters, &counters[i],
1456 1447 sizeof(counters[i])) != 0)
1457 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1448 return -EFAULT;
1458 goto out;
1459 1449
1460 *dstptr += sizeof(struct compat_ipt_entry); 1450 *dstptr += sizeof(struct compat_ipt_entry);
1461 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1451 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1462 1452
1463 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1453 xt_ematch_foreach(ematch, e) {
1454 ret = xt_compat_match_to_user(ematch, dstptr, size);
1455 if (ret != 0)
1456 return ret;
1457 }
1464 target_offset = e->target_offset - (origsize - *size); 1458 target_offset = e->target_offset - (origsize - *size);
1465 if (ret)
1466 goto out;
1467 t = ipt_get_target(e); 1459 t = ipt_get_target(e);
1468 ret = xt_compat_target_to_user(t, dstptr, size); 1460 ret = xt_compat_target_to_user(t, dstptr, size);
1469 if (ret) 1461 if (ret)
1470 goto out; 1462 return ret;
1471 ret = -EFAULT;
1472 next_offset = e->next_offset - (origsize - *size); 1463 next_offset = e->next_offset - (origsize - *size);
1473 if (put_user(target_offset, &ce->target_offset)) 1464 if (put_user(target_offset, &ce->target_offset) != 0 ||
1474 goto out; 1465 put_user(next_offset, &ce->next_offset) != 0)
1475 if (put_user(next_offset, &ce->next_offset)) 1466 return -EFAULT;
1476 goto out;
1477
1478 (*i)++;
1479 return 0; 1467 return 0;
1480out:
1481 return ret;
1482} 1468}
1483 1469
1484static int 1470static int
@@ -1486,7 +1472,7 @@ compat_find_calc_match(struct ipt_entry_match *m,
1486 const char *name, 1472 const char *name,
1487 const struct ipt_ip *ip, 1473 const struct ipt_ip *ip,
1488 unsigned int hookmask, 1474 unsigned int hookmask,
1489 int *size, unsigned int *i) 1475 int *size)
1490{ 1476{
1491 struct xt_match *match; 1477 struct xt_match *match;
1492 1478
@@ -1500,47 +1486,32 @@ compat_find_calc_match(struct ipt_entry_match *m,
1500 } 1486 }
1501 m->u.kernel.match = match; 1487 m->u.kernel.match = match;
1502 *size += xt_compat_match_offset(match); 1488 *size += xt_compat_match_offset(match);
1503
1504 (*i)++;
1505 return 0;
1506}
1507
1508static int
1509compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1510{
1511 if (i && (*i)-- == 0)
1512 return 1;
1513
1514 module_put(m->u.kernel.match->me);
1515 return 0; 1489 return 0;
1516} 1490}
1517 1491
1518static int 1492static void compat_release_entry(struct compat_ipt_entry *e)
1519compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1520{ 1493{
1521 struct ipt_entry_target *t; 1494 struct ipt_entry_target *t;
1522 1495 struct xt_entry_match *ematch;
1523 if (i && (*i)-- == 0)
1524 return 1;
1525 1496
1526 /* Cleanup all matches */ 1497 /* Cleanup all matches */
1527 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL); 1498 xt_ematch_foreach(ematch, e)
1499 module_put(ematch->u.kernel.match->me);
1528 t = compat_ipt_get_target(e); 1500 t = compat_ipt_get_target(e);
1529 module_put(t->u.kernel.target->me); 1501 module_put(t->u.kernel.target->me);
1530 return 0;
1531} 1502}
1532 1503
1533static int 1504static int
1534check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, 1505check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1535 struct xt_table_info *newinfo, 1506 struct xt_table_info *newinfo,
1536 unsigned int *size, 1507 unsigned int *size,
1537 unsigned char *base, 1508 const unsigned char *base,
1538 unsigned char *limit, 1509 const unsigned char *limit,
1539 unsigned int *hook_entries, 1510 const unsigned int *hook_entries,
1540 unsigned int *underflows, 1511 const unsigned int *underflows,
1541 unsigned int *i,
1542 const char *name) 1512 const char *name)
1543{ 1513{
1514 struct xt_entry_match *ematch;
1544 struct ipt_entry_target *t; 1515 struct ipt_entry_target *t;
1545 struct xt_target *target; 1516 struct xt_target *target;
1546 unsigned int entry_offset; 1517 unsigned int entry_offset;
@@ -1569,10 +1540,13 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1569 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1540 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1570 entry_offset = (void *)e - (void *)base; 1541 entry_offset = (void *)e - (void *)base;
1571 j = 0; 1542 j = 0;
1572 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name, 1543 xt_ematch_foreach(ematch, e) {
1573 &e->ip, e->comefrom, &off, &j); 1544 ret = compat_find_calc_match(ematch, name,
1574 if (ret != 0) 1545 &e->ip, e->comefrom, &off);
1575 goto release_matches; 1546 if (ret != 0)
1547 goto release_matches;
1548 ++j;
1549 }
1576 1550
1577 t = compat_ipt_get_target(e); 1551 t = compat_ipt_get_target(e);
1578 target = try_then_request_module(xt_find_target(AF_INET, 1552 target = try_then_request_module(xt_find_target(AF_INET,
@@ -1604,14 +1578,16 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1604 /* Clear counters and comefrom */ 1578 /* Clear counters and comefrom */
1605 memset(&e->counters, 0, sizeof(e->counters)); 1579 memset(&e->counters, 0, sizeof(e->counters));
1606 e->comefrom = 0; 1580 e->comefrom = 0;
1607
1608 (*i)++;
1609 return 0; 1581 return 0;
1610 1582
1611out: 1583out:
1612 module_put(t->u.kernel.target->me); 1584 module_put(t->u.kernel.target->me);
1613release_matches: 1585release_matches:
1614 IPT_MATCH_ITERATE(e, compat_release_match, &j); 1586 xt_ematch_foreach(ematch, e) {
1587 if (j-- == 0)
1588 break;
1589 module_put(ematch->u.kernel.match->me);
1590 }
1615 return ret; 1591 return ret;
1616} 1592}
1617 1593
@@ -1625,6 +1601,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1625 struct ipt_entry *de; 1601 struct ipt_entry *de;
1626 unsigned int origsize; 1602 unsigned int origsize;
1627 int ret, h; 1603 int ret, h;
1604 struct xt_entry_match *ematch;
1628 1605
1629 ret = 0; 1606 ret = 0;
1630 origsize = *size; 1607 origsize = *size;
@@ -1635,10 +1612,11 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1635 *dstptr += sizeof(struct ipt_entry); 1612 *dstptr += sizeof(struct ipt_entry);
1636 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1613 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1637 1614
1638 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user, 1615 xt_ematch_foreach(ematch, e) {
1639 dstptr, size); 1616 ret = xt_compat_match_from_user(ematch, dstptr, size);
1640 if (ret) 1617 if (ret != 0)
1641 return ret; 1618 return ret;
1619 }
1642 de->target_offset = e->target_offset - (origsize - *size); 1620 de->target_offset = e->target_offset - (origsize - *size);
1643 t = compat_ipt_get_target(e); 1621 t = compat_ipt_get_target(e);
1644 target = t->u.kernel.target; 1622 target = t->u.kernel.target;
@@ -1655,36 +1633,43 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1655} 1633}
1656 1634
1657static int 1635static int
1658compat_check_entry(struct ipt_entry *e, const char *name, 1636compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1659 unsigned int *i)
1660{ 1637{
1638 struct xt_entry_match *ematch;
1661 struct xt_mtchk_param mtpar; 1639 struct xt_mtchk_param mtpar;
1662 unsigned int j; 1640 unsigned int j;
1663 int ret; 1641 int ret = 0;
1664 1642
1665 j = 0; 1643 j = 0;
1644 mtpar.net = net;
1666 mtpar.table = name; 1645 mtpar.table = name;
1667 mtpar.entryinfo = &e->ip; 1646 mtpar.entryinfo = &e->ip;
1668 mtpar.hook_mask = e->comefrom; 1647 mtpar.hook_mask = e->comefrom;
1669 mtpar.family = NFPROTO_IPV4; 1648 mtpar.family = NFPROTO_IPV4;
1670 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j); 1649 xt_ematch_foreach(ematch, e) {
1671 if (ret) 1650 ret = check_match(ematch, &mtpar);
1672 goto cleanup_matches; 1651 if (ret != 0)
1652 goto cleanup_matches;
1653 ++j;
1654 }
1673 1655
1674 ret = check_target(e, name); 1656 ret = check_target(e, net, name);
1675 if (ret) 1657 if (ret)
1676 goto cleanup_matches; 1658 goto cleanup_matches;
1677
1678 (*i)++;
1679 return 0; 1659 return 0;
1680 1660
1681 cleanup_matches: 1661 cleanup_matches:
1682 IPT_MATCH_ITERATE(e, cleanup_match, &j); 1662 xt_ematch_foreach(ematch, e) {
1663 if (j-- == 0)
1664 break;
1665 cleanup_match(ematch, net);
1666 }
1683 return ret; 1667 return ret;
1684} 1668}
1685 1669
1686static int 1670static int
1687translate_compat_table(const char *name, 1671translate_compat_table(struct net *net,
1672 const char *name,
1688 unsigned int valid_hooks, 1673 unsigned int valid_hooks,
1689 struct xt_table_info **pinfo, 1674 struct xt_table_info **pinfo,
1690 void **pentry0, 1675 void **pentry0,
@@ -1696,6 +1681,8 @@ translate_compat_table(const char *name,
1696 unsigned int i, j; 1681 unsigned int i, j;
1697 struct xt_table_info *newinfo, *info; 1682 struct xt_table_info *newinfo, *info;
1698 void *pos, *entry0, *entry1; 1683 void *pos, *entry0, *entry1;
1684 struct compat_ipt_entry *iter0;
1685 struct ipt_entry *iter1;
1699 unsigned int size; 1686 unsigned int size;
1700 int ret; 1687 int ret;
1701 1688
@@ -1714,13 +1701,17 @@ translate_compat_table(const char *name,
1714 j = 0; 1701 j = 0;
1715 xt_compat_lock(AF_INET); 1702 xt_compat_lock(AF_INET);
1716 /* Walk through entries, checking offsets. */ 1703 /* Walk through entries, checking offsets. */
1717 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1704 xt_entry_foreach(iter0, entry0, total_size) {
1718 check_compat_entry_size_and_hooks, 1705 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1719 info, &size, entry0, 1706 entry0,
1720 entry0 + total_size, 1707 entry0 + total_size,
1721 hook_entries, underflows, &j, name); 1708 hook_entries,
1722 if (ret != 0) 1709 underflows,
1723 goto out_unlock; 1710 name);
1711 if (ret != 0)
1712 goto out_unlock;
1713 ++j;
1714 }
1724 1715
1725 ret = -EINVAL; 1716 ret = -EINVAL;
1726 if (j != number) { 1717 if (j != number) {
@@ -1759,9 +1750,12 @@ translate_compat_table(const char *name,
1759 entry1 = newinfo->entries[raw_smp_processor_id()]; 1750 entry1 = newinfo->entries[raw_smp_processor_id()];
1760 pos = entry1; 1751 pos = entry1;
1761 size = total_size; 1752 size = total_size;
1762 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1753 xt_entry_foreach(iter0, entry0, total_size) {
1763 compat_copy_entry_from_user, 1754 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1764 &pos, &size, name, newinfo, entry1); 1755 name, newinfo, entry1);
1756 if (ret != 0)
1757 break;
1758 }
1765 xt_compat_flush_offsets(AF_INET); 1759 xt_compat_flush_offsets(AF_INET);
1766 xt_compat_unlock(AF_INET); 1760 xt_compat_unlock(AF_INET);
1767 if (ret) 1761 if (ret)
@@ -1772,13 +1766,32 @@ translate_compat_table(const char *name,
1772 goto free_newinfo; 1766 goto free_newinfo;
1773 1767
1774 i = 0; 1768 i = 0;
1775 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1769 xt_entry_foreach(iter1, entry1, newinfo->size) {
1776 name, &i); 1770 ret = compat_check_entry(iter1, net, name);
1771 if (ret != 0)
1772 break;
1773 ++i;
1774 }
1777 if (ret) { 1775 if (ret) {
1776 /*
1777 * The first i matches need cleanup_entry (calls ->destroy)
1778 * because they had called ->check already. The other j-i
1779 * entries need only release.
1780 */
1781 int skip = i;
1778 j -= i; 1782 j -= i;
1779 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1783 xt_entry_foreach(iter0, entry0, newinfo->size) {
1780 compat_release_entry, &j); 1784 if (skip-- > 0)
1781 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1785 continue;
1786 if (j-- == 0)
1787 break;
1788 compat_release_entry(iter0);
1789 }
1790 xt_entry_foreach(iter1, entry1, newinfo->size) {
1791 if (i-- == 0)
1792 break;
1793 cleanup_entry(iter1, net);
1794 }
1782 xt_free_table_info(newinfo); 1795 xt_free_table_info(newinfo);
1783 return ret; 1796 return ret;
1784 } 1797 }
@@ -1796,7 +1809,11 @@ translate_compat_table(const char *name,
1796free_newinfo: 1809free_newinfo:
1797 xt_free_table_info(newinfo); 1810 xt_free_table_info(newinfo);
1798out: 1811out:
1799 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1812 xt_entry_foreach(iter0, entry0, total_size) {
1813 if (j-- == 0)
1814 break;
1815 compat_release_entry(iter0);
1816 }
1800 return ret; 1817 return ret;
1801out_unlock: 1818out_unlock:
1802 xt_compat_flush_offsets(AF_INET); 1819 xt_compat_flush_offsets(AF_INET);
@@ -1811,6 +1828,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1811 struct compat_ipt_replace tmp; 1828 struct compat_ipt_replace tmp;
1812 struct xt_table_info *newinfo; 1829 struct xt_table_info *newinfo;
1813 void *loc_cpu_entry; 1830 void *loc_cpu_entry;
1831 struct ipt_entry *iter;
1814 1832
1815 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1833 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1816 return -EFAULT; 1834 return -EFAULT;
@@ -1833,7 +1851,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1833 goto free_newinfo; 1851 goto free_newinfo;
1834 } 1852 }
1835 1853
1836 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1854 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1837 &newinfo, &loc_cpu_entry, tmp.size, 1855 &newinfo, &loc_cpu_entry, tmp.size,
1838 tmp.num_entries, tmp.hook_entry, 1856 tmp.num_entries, tmp.hook_entry,
1839 tmp.underflow); 1857 tmp.underflow);
@@ -1849,7 +1867,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1849 return 0; 1867 return 0;
1850 1868
1851 free_newinfo_untrans: 1869 free_newinfo_untrans:
1852 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1870 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1871 cleanup_entry(iter, net);
1853 free_newinfo: 1872 free_newinfo:
1854 xt_free_table_info(newinfo); 1873 xt_free_table_info(newinfo);
1855 return ret; 1874 return ret;
@@ -1898,6 +1917,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1898 int ret = 0; 1917 int ret = 0;
1899 const void *loc_cpu_entry; 1918 const void *loc_cpu_entry;
1900 unsigned int i = 0; 1919 unsigned int i = 0;
1920 struct ipt_entry *iter;
1901 1921
1902 counters = alloc_counters(table); 1922 counters = alloc_counters(table);
1903 if (IS_ERR(counters)) 1923 if (IS_ERR(counters))
@@ -1910,9 +1930,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1910 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1930 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1911 pos = userptr; 1931 pos = userptr;
1912 size = total_size; 1932 size = total_size;
1913 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1933 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1914 compat_copy_entry_to_user, 1934 ret = compat_copy_entry_to_user(iter, &pos,
1915 &pos, &size, counters, &i); 1935 &size, counters, i++);
1936 if (ret != 0)
1937 break;
1938 }
1916 1939
1917 vfree(counters); 1940 vfree(counters);
1918 return ret; 1941 return ret;
@@ -2086,11 +2109,7 @@ struct xt_table *ipt_register_table(struct net *net,
2086 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2109 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry, repl->entries, repl->size); 2110 memcpy(loc_cpu_entry, repl->entries, repl->size);
2088 2111
2089 ret = translate_table(table->name, table->valid_hooks, 2112 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2090 newinfo, loc_cpu_entry, repl->size,
2091 repl->num_entries,
2092 repl->hook_entry,
2093 repl->underflow);
2094 if (ret != 0) 2113 if (ret != 0)
2095 goto out_free; 2114 goto out_free;
2096 2115
@@ -2108,17 +2127,19 @@ out:
2108 return ERR_PTR(ret); 2127 return ERR_PTR(ret);
2109} 2128}
2110 2129
2111void ipt_unregister_table(struct xt_table *table) 2130void ipt_unregister_table(struct net *net, struct xt_table *table)
2112{ 2131{
2113 struct xt_table_info *private; 2132 struct xt_table_info *private;
2114 void *loc_cpu_entry; 2133 void *loc_cpu_entry;
2115 struct module *table_owner = table->me; 2134 struct module *table_owner = table->me;
2135 struct ipt_entry *iter;
2116 2136
2117 private = xt_unregister_table(table); 2137 private = xt_unregister_table(table);
2118 2138
2119 /* Decrease module usage counts and free resources */ 2139 /* Decrease module usage counts and free resources */
2120 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2140 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2121 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2141 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2142 cleanup_entry(iter, net);
2122 if (private->number > private->initial_entries) 2143 if (private->number > private->initial_entries)
2123 module_put(table_owner); 2144 module_put(table_owner);
2124 xt_free_table_info(private); 2145 xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 40ca2d240abb..ab828400ed71 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -14,6 +14,7 @@
14#include <linux/jhash.h> 14#include <linux/jhash.h>
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/slab.h>
17#include <linux/ip.h> 18#include <linux/ip.h>
18#include <linux/tcp.h> 19#include <linux/tcp.h>
19#include <linux/udp.h> 20#include <linux/udp.h>
@@ -560,8 +561,7 @@ struct clusterip_seq_position {
560 561
561static void *clusterip_seq_start(struct seq_file *s, loff_t *pos) 562static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
562{ 563{
563 const struct proc_dir_entry *pde = s->private; 564 struct clusterip_config *c = s->private;
564 struct clusterip_config *c = pde->data;
565 unsigned int weight; 565 unsigned int weight;
566 u_int32_t local_nodes; 566 u_int32_t local_nodes;
567 struct clusterip_seq_position *idx; 567 struct clusterip_seq_position *idx;
@@ -632,10 +632,9 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
632 632
633 if (!ret) { 633 if (!ret) {
634 struct seq_file *sf = file->private_data; 634 struct seq_file *sf = file->private_data;
635 struct proc_dir_entry *pde = PDE(inode); 635 struct clusterip_config *c = PDE(inode)->data;
636 struct clusterip_config *c = pde->data;
637 636
638 sf->private = pde; 637 sf->private = c;
639 638
640 clusterip_config_get(c); 639 clusterip_config_get(c);
641 } 640 }
@@ -645,8 +644,7 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
645 644
646static int clusterip_proc_release(struct inode *inode, struct file *file) 645static int clusterip_proc_release(struct inode *inode, struct file *file)
647{ 646{
648 struct proc_dir_entry *pde = PDE(inode); 647 struct clusterip_config *c = PDE(inode)->data;
649 struct clusterip_config *c = pde->data;
650 int ret; 648 int ret;
651 649
652 ret = seq_release(inode, file); 650 ret = seq_release(inode, file);
@@ -660,10 +658,9 @@ static int clusterip_proc_release(struct inode *inode, struct file *file)
660static ssize_t clusterip_proc_write(struct file *file, const char __user *input, 658static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
661 size_t size, loff_t *ofs) 659 size_t size, loff_t *ofs)
662{ 660{
661 struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
663#define PROC_WRITELEN 10 662#define PROC_WRITELEN 10
664 char buffer[PROC_WRITELEN+1]; 663 char buffer[PROC_WRITELEN+1];
665 const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
666 struct clusterip_config *c = pde->data;
667 unsigned long nodenum; 664 unsigned long nodenum;
668 665
669 if (copy_from_user(buffer, input, PROC_WRITELEN)) 666 if (copy_from_user(buffer, input, PROC_WRITELEN))
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 5113b8f1a379..a0e8bcf04159 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <linux/ip.h> 16#include <linux/ip.h>
16#include <linux/udp.h> 17#include <linux/udp.h>
17#include <linux/icmp.h> 18#include <linux/icmp.h>
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 399061c3fd7d..0dbe697f164f 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/socket.h> 35#include <linux/socket.h>
36#include <linux/slab.h>
36#include <linux/skbuff.h> 37#include <linux/skbuff.h>
37#include <linux/kernel.h> 38#include <linux/kernel.h>
38#include <linux/timer.h> 39#include <linux/timer.h>
@@ -338,7 +339,7 @@ struct compat_ipt_ulog_info {
338 char prefix[ULOG_PREFIX_LEN]; 339 char prefix[ULOG_PREFIX_LEN];
339}; 340};
340 341
341static void ulog_tg_compat_from_user(void *dst, void *src) 342static void ulog_tg_compat_from_user(void *dst, const void *src)
342{ 343{
343 const struct compat_ipt_ulog_info *cl = src; 344 const struct compat_ipt_ulog_info *cl = src;
344 struct ipt_ulog_info l = { 345 struct ipt_ulog_info l = {
@@ -351,7 +352,7 @@ static void ulog_tg_compat_from_user(void *dst, void *src)
351 memcpy(dst, &l, sizeof(l)); 352 memcpy(dst, &l, sizeof(l));
352} 353}
353 354
354static int ulog_tg_compat_to_user(void __user *dst, void *src) 355static int ulog_tg_compat_to_user(void __user *dst, const void *src)
355{ 356{
356 const struct ipt_ulog_info *l = src; 357 const struct ipt_ulog_info *l = src;
357 struct compat_ipt_ulog_info cl = { 358 struct compat_ipt_ulog_info cl = {
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index df566cbd68e5..55392466daa4 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/netfilter_ipv4/ip_tables.h> 15#include <linux/netfilter_ipv4/ip_tables.h>
16#include <linux/slab.h>
16#include <net/ip.h> 17#include <net/ip.h>
17 18
18MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
@@ -23,104 +24,32 @@ MODULE_DESCRIPTION("iptables filter table");
23 (1 << NF_INET_FORWARD) | \ 24 (1 << NF_INET_FORWARD) | \
24 (1 << NF_INET_LOCAL_OUT)) 25 (1 << NF_INET_LOCAL_OUT))
25 26
26static struct
27{
28 struct ipt_replace repl;
29 struct ipt_standard entries[3];
30 struct ipt_error term;
31} initial_table __net_initdata = {
32 .repl = {
33 .name = "filter",
34 .valid_hooks = FILTER_VALID_HOOKS,
35 .num_entries = 4,
36 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
37 .hook_entry = {
38 [NF_INET_LOCAL_IN] = 0,
39 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
40 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
41 },
42 .underflow = {
43 [NF_INET_LOCAL_IN] = 0,
44 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
46 },
47 },
48 .entries = {
49 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
50 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
51 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
52 },
53 .term = IPT_ERROR_INIT, /* ERROR */
54};
55
56static const struct xt_table packet_filter = { 27static const struct xt_table packet_filter = {
57 .name = "filter", 28 .name = "filter",
58 .valid_hooks = FILTER_VALID_HOOKS, 29 .valid_hooks = FILTER_VALID_HOOKS,
59 .me = THIS_MODULE, 30 .me = THIS_MODULE,
60 .af = NFPROTO_IPV4, 31 .af = NFPROTO_IPV4,
32 .priority = NF_IP_PRI_FILTER,
61}; 33};
62 34
63/* The work comes in here from netfilter.c. */
64static unsigned int
65ipt_local_in_hook(unsigned int hook,
66 struct sk_buff *skb,
67 const struct net_device *in,
68 const struct net_device *out,
69 int (*okfn)(struct sk_buff *))
70{
71 return ipt_do_table(skb, hook, in, out,
72 dev_net(in)->ipv4.iptable_filter);
73}
74
75static unsigned int 35static unsigned int
76ipt_hook(unsigned int hook, 36iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
77 struct sk_buff *skb, 37 const struct net_device *in, const struct net_device *out,
78 const struct net_device *in, 38 int (*okfn)(struct sk_buff *))
79 const struct net_device *out,
80 int (*okfn)(struct sk_buff *))
81{ 39{
82 return ipt_do_table(skb, hook, in, out, 40 const struct net *net;
83 dev_net(in)->ipv4.iptable_filter);
84}
85 41
86static unsigned int 42 if (hook == NF_INET_LOCAL_OUT &&
87ipt_local_out_hook(unsigned int hook, 43 (skb->len < sizeof(struct iphdr) ||
88 struct sk_buff *skb, 44 ip_hdrlen(skb) < sizeof(struct iphdr)))
89 const struct net_device *in, 45 /* root is playing with raw sockets. */
90 const struct net_device *out,
91 int (*okfn)(struct sk_buff *))
92{
93 /* root is playing with raw sockets. */
94 if (skb->len < sizeof(struct iphdr) ||
95 ip_hdrlen(skb) < sizeof(struct iphdr))
96 return NF_ACCEPT; 46 return NF_ACCEPT;
97 return ipt_do_table(skb, hook, in, out, 47
98 dev_net(out)->ipv4.iptable_filter); 48 net = dev_net((in != NULL) ? in : out);
49 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
99} 50}
100 51
101static struct nf_hook_ops ipt_ops[] __read_mostly = { 52static struct nf_hook_ops *filter_ops __read_mostly;
102 {
103 .hook = ipt_local_in_hook,
104 .owner = THIS_MODULE,
105 .pf = NFPROTO_IPV4,
106 .hooknum = NF_INET_LOCAL_IN,
107 .priority = NF_IP_PRI_FILTER,
108 },
109 {
110 .hook = ipt_hook,
111 .owner = THIS_MODULE,
112 .pf = NFPROTO_IPV4,
113 .hooknum = NF_INET_FORWARD,
114 .priority = NF_IP_PRI_FILTER,
115 },
116 {
117 .hook = ipt_local_out_hook,
118 .owner = THIS_MODULE,
119 .pf = NFPROTO_IPV4,
120 .hooknum = NF_INET_LOCAL_OUT,
121 .priority = NF_IP_PRI_FILTER,
122 },
123};
124 53
125/* Default to forward because I got too much mail already. */ 54/* Default to forward because I got too much mail already. */
126static int forward = NF_ACCEPT; 55static int forward = NF_ACCEPT;
@@ -128,9 +57,18 @@ module_param(forward, bool, 0000);
128 57
129static int __net_init iptable_filter_net_init(struct net *net) 58static int __net_init iptable_filter_net_init(struct net *net)
130{ 59{
131 /* Register table */ 60 struct ipt_replace *repl;
61
62 repl = ipt_alloc_initial_table(&packet_filter);
63 if (repl == NULL)
64 return -ENOMEM;
65 /* Entry 1 is the FORWARD hook */
66 ((struct ipt_standard *)repl->entries)[1].target.verdict =
67 -forward - 1;
68
132 net->ipv4.iptable_filter = 69 net->ipv4.iptable_filter =
133 ipt_register_table(net, &packet_filter, &initial_table.repl); 70 ipt_register_table(net, &packet_filter, repl);
71 kfree(repl);
134 if (IS_ERR(net->ipv4.iptable_filter)) 72 if (IS_ERR(net->ipv4.iptable_filter))
135 return PTR_ERR(net->ipv4.iptable_filter); 73 return PTR_ERR(net->ipv4.iptable_filter);
136 return 0; 74 return 0;
@@ -138,7 +76,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
138 76
139static void __net_exit iptable_filter_net_exit(struct net *net) 77static void __net_exit iptable_filter_net_exit(struct net *net)
140{ 78{
141 ipt_unregister_table(net->ipv4.iptable_filter); 79 ipt_unregister_table(net, net->ipv4.iptable_filter);
142} 80}
143 81
144static struct pernet_operations iptable_filter_net_ops = { 82static struct pernet_operations iptable_filter_net_ops = {
@@ -155,17 +93,16 @@ static int __init iptable_filter_init(void)
155 return -EINVAL; 93 return -EINVAL;
156 } 94 }
157 95
158 /* Entry 1 is the FORWARD hook */
159 initial_table.entries[1].target.verdict = -forward - 1;
160
161 ret = register_pernet_subsys(&iptable_filter_net_ops); 96 ret = register_pernet_subsys(&iptable_filter_net_ops);
162 if (ret < 0) 97 if (ret < 0)
163 return ret; 98 return ret;
164 99
165 /* Register hooks */ 100 /* Register hooks */
166 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 101 filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
167 if (ret < 0) 102 if (IS_ERR(filter_ops)) {
103 ret = PTR_ERR(filter_ops);
168 goto cleanup_table; 104 goto cleanup_table;
105 }
169 106
170 return ret; 107 return ret;
171 108
@@ -176,7 +113,7 @@ static int __init iptable_filter_init(void)
176 113
177static void __exit iptable_filter_fini(void) 114static void __exit iptable_filter_fini(void)
178{ 115{
179 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 116 xt_hook_unlink(&packet_filter, filter_ops);
180 unregister_pernet_subsys(&iptable_filter_net_ops); 117 unregister_pernet_subsys(&iptable_filter_net_ops);
181} 118}
182 119
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index fae78c3076c4..294a2a32f293 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -12,6 +12,7 @@
12#include <linux/netfilter_ipv4/ip_tables.h> 12#include <linux/netfilter_ipv4/ip_tables.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/route.h> 17#include <net/route.h>
17#include <linux/ip.h> 18#include <linux/ip.h>
@@ -27,101 +28,16 @@ MODULE_DESCRIPTION("iptables mangle table");
27 (1 << NF_INET_LOCAL_OUT) | \ 28 (1 << NF_INET_LOCAL_OUT) | \
28 (1 << NF_INET_POST_ROUTING)) 29 (1 << NF_INET_POST_ROUTING))
29 30
30/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
31static const struct
32{
33 struct ipt_replace repl;
34 struct ipt_standard entries[5];
35 struct ipt_error term;
36} initial_table __net_initdata = {
37 .repl = {
38 .name = "mangle",
39 .valid_hooks = MANGLE_VALID_HOOKS,
40 .num_entries = 6,
41 .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
45 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
48 },
49 .underflow = {
50 [NF_INET_PRE_ROUTING] = 0,
51 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
52 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
53 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
54 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
55 },
56 },
57 .entries = {
58 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
59 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
60 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
61 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
62 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
63 },
64 .term = IPT_ERROR_INIT, /* ERROR */
65};
66
67static const struct xt_table packet_mangler = { 31static const struct xt_table packet_mangler = {
68 .name = "mangle", 32 .name = "mangle",
69 .valid_hooks = MANGLE_VALID_HOOKS, 33 .valid_hooks = MANGLE_VALID_HOOKS,
70 .me = THIS_MODULE, 34 .me = THIS_MODULE,
71 .af = NFPROTO_IPV4, 35 .af = NFPROTO_IPV4,
36 .priority = NF_IP_PRI_MANGLE,
72}; 37};
73 38
74/* The work comes in here from netfilter.c. */
75static unsigned int
76ipt_pre_routing_hook(unsigned int hook,
77 struct sk_buff *skb,
78 const struct net_device *in,
79 const struct net_device *out,
80 int (*okfn)(struct sk_buff *))
81{
82 return ipt_do_table(skb, hook, in, out,
83 dev_net(in)->ipv4.iptable_mangle);
84}
85
86static unsigned int
87ipt_post_routing_hook(unsigned int hook,
88 struct sk_buff *skb,
89 const struct net_device *in,
90 const struct net_device *out,
91 int (*okfn)(struct sk_buff *))
92{
93 return ipt_do_table(skb, hook, in, out,
94 dev_net(out)->ipv4.iptable_mangle);
95}
96
97static unsigned int
98ipt_local_in_hook(unsigned int hook,
99 struct sk_buff *skb,
100 const struct net_device *in,
101 const struct net_device *out,
102 int (*okfn)(struct sk_buff *))
103{
104 return ipt_do_table(skb, hook, in, out,
105 dev_net(in)->ipv4.iptable_mangle);
106}
107
108static unsigned int
109ipt_forward_hook(unsigned int hook,
110 struct sk_buff *skb,
111 const struct net_device *in,
112 const struct net_device *out,
113 int (*okfn)(struct sk_buff *))
114{
115 return ipt_do_table(skb, hook, in, out,
116 dev_net(in)->ipv4.iptable_mangle);
117}
118
119static unsigned int 39static unsigned int
120ipt_local_hook(unsigned int hook, 40ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
121 struct sk_buff *skb,
122 const struct net_device *in,
123 const struct net_device *out,
124 int (*okfn)(struct sk_buff *))
125{ 41{
126 unsigned int ret; 42 unsigned int ret;
127 const struct iphdr *iph; 43 const struct iphdr *iph;
@@ -141,7 +57,7 @@ ipt_local_hook(unsigned int hook,
141 daddr = iph->daddr; 57 daddr = iph->daddr;
142 tos = iph->tos; 58 tos = iph->tos;
143 59
144 ret = ipt_do_table(skb, hook, in, out, 60 ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
145 dev_net(out)->ipv4.iptable_mangle); 61 dev_net(out)->ipv4.iptable_mangle);
146 /* Reroute for ANY change. */ 62 /* Reroute for ANY change. */
147 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { 63 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
@@ -158,49 +74,36 @@ ipt_local_hook(unsigned int hook,
158 return ret; 74 return ret;
159} 75}
160 76
161static struct nf_hook_ops ipt_ops[] __read_mostly = { 77/* The work comes in here from netfilter.c. */
162 { 78static unsigned int
163 .hook = ipt_pre_routing_hook, 79iptable_mangle_hook(unsigned int hook,
164 .owner = THIS_MODULE, 80 struct sk_buff *skb,
165 .pf = NFPROTO_IPV4, 81 const struct net_device *in,
166 .hooknum = NF_INET_PRE_ROUTING, 82 const struct net_device *out,
167 .priority = NF_IP_PRI_MANGLE, 83 int (*okfn)(struct sk_buff *))
168 }, 84{
169 { 85 if (hook == NF_INET_LOCAL_OUT)
170 .hook = ipt_local_in_hook, 86 return ipt_mangle_out(skb, out);
171 .owner = THIS_MODULE, 87 if (hook == NF_INET_POST_ROUTING)
172 .pf = NFPROTO_IPV4, 88 return ipt_do_table(skb, hook, in, out,
173 .hooknum = NF_INET_LOCAL_IN, 89 dev_net(out)->ipv4.iptable_mangle);
174 .priority = NF_IP_PRI_MANGLE, 90 /* PREROUTING/INPUT/FORWARD: */
175 }, 91 return ipt_do_table(skb, hook, in, out,
176 { 92 dev_net(in)->ipv4.iptable_mangle);
177 .hook = ipt_forward_hook, 93}
178 .owner = THIS_MODULE, 94
179 .pf = NFPROTO_IPV4, 95static struct nf_hook_ops *mangle_ops __read_mostly;
180 .hooknum = NF_INET_FORWARD,
181 .priority = NF_IP_PRI_MANGLE,
182 },
183 {
184 .hook = ipt_local_hook,
185 .owner = THIS_MODULE,
186 .pf = NFPROTO_IPV4,
187 .hooknum = NF_INET_LOCAL_OUT,
188 .priority = NF_IP_PRI_MANGLE,
189 },
190 {
191 .hook = ipt_post_routing_hook,
192 .owner = THIS_MODULE,
193 .pf = NFPROTO_IPV4,
194 .hooknum = NF_INET_POST_ROUTING,
195 .priority = NF_IP_PRI_MANGLE,
196 },
197};
198 96
199static int __net_init iptable_mangle_net_init(struct net *net) 97static int __net_init iptable_mangle_net_init(struct net *net)
200{ 98{
201 /* Register table */ 99 struct ipt_replace *repl;
100
101 repl = ipt_alloc_initial_table(&packet_mangler);
102 if (repl == NULL)
103 return -ENOMEM;
202 net->ipv4.iptable_mangle = 104 net->ipv4.iptable_mangle =
203 ipt_register_table(net, &packet_mangler, &initial_table.repl); 105 ipt_register_table(net, &packet_mangler, repl);
106 kfree(repl);
204 if (IS_ERR(net->ipv4.iptable_mangle)) 107 if (IS_ERR(net->ipv4.iptable_mangle))
205 return PTR_ERR(net->ipv4.iptable_mangle); 108 return PTR_ERR(net->ipv4.iptable_mangle);
206 return 0; 109 return 0;
@@ -208,7 +111,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
208 111
209static void __net_exit iptable_mangle_net_exit(struct net *net) 112static void __net_exit iptable_mangle_net_exit(struct net *net)
210{ 113{
211 ipt_unregister_table(net->ipv4.iptable_mangle); 114 ipt_unregister_table(net, net->ipv4.iptable_mangle);
212} 115}
213 116
214static struct pernet_operations iptable_mangle_net_ops = { 117static struct pernet_operations iptable_mangle_net_ops = {
@@ -225,9 +128,11 @@ static int __init iptable_mangle_init(void)
225 return ret; 128 return ret;
226 129
227 /* Register hooks */ 130 /* Register hooks */
228 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 131 mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
229 if (ret < 0) 132 if (IS_ERR(mangle_ops)) {
133 ret = PTR_ERR(mangle_ops);
230 goto cleanup_table; 134 goto cleanup_table;
135 }
231 136
232 return ret; 137 return ret;
233 138
@@ -238,7 +143,7 @@ static int __init iptable_mangle_init(void)
238 143
239static void __exit iptable_mangle_fini(void) 144static void __exit iptable_mangle_fini(void)
240{ 145{
241 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 146 xt_hook_unlink(&packet_mangler, mangle_ops);
242 unregister_pernet_subsys(&iptable_mangle_net_ops); 147 unregister_pernet_subsys(&iptable_mangle_net_ops);
243} 148}
244 149
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 993edc23be09..07fb710cd722 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -5,94 +5,49 @@
5 */ 5 */
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/netfilter_ipv4/ip_tables.h> 7#include <linux/netfilter_ipv4/ip_tables.h>
8#include <linux/slab.h>
8#include <net/ip.h> 9#include <net/ip.h>
9 10
10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 11#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
11 12
12static const struct
13{
14 struct ipt_replace repl;
15 struct ipt_standard entries[2];
16 struct ipt_error term;
17} initial_table __net_initdata = {
18 .repl = {
19 .name = "raw",
20 .valid_hooks = RAW_VALID_HOOKS,
21 .num_entries = 3,
22 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
23 .hook_entry = {
24 [NF_INET_PRE_ROUTING] = 0,
25 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
26 },
27 .underflow = {
28 [NF_INET_PRE_ROUTING] = 0,
29 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
30 },
31 },
32 .entries = {
33 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
34 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
35 },
36 .term = IPT_ERROR_INIT, /* ERROR */
37};
38
39static const struct xt_table packet_raw = { 13static const struct xt_table packet_raw = {
40 .name = "raw", 14 .name = "raw",
41 .valid_hooks = RAW_VALID_HOOKS, 15 .valid_hooks = RAW_VALID_HOOKS,
42 .me = THIS_MODULE, 16 .me = THIS_MODULE,
43 .af = NFPROTO_IPV4, 17 .af = NFPROTO_IPV4,
18 .priority = NF_IP_PRI_RAW,
44}; 19};
45 20
46/* The work comes in here from netfilter.c. */ 21/* The work comes in here from netfilter.c. */
47static unsigned int 22static unsigned int
48ipt_hook(unsigned int hook, 23iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
49 struct sk_buff *skb, 24 const struct net_device *in, const struct net_device *out,
50 const struct net_device *in, 25 int (*okfn)(struct sk_buff *))
51 const struct net_device *out,
52 int (*okfn)(struct sk_buff *))
53{ 26{
54 return ipt_do_table(skb, hook, in, out, 27 const struct net *net;
55 dev_net(in)->ipv4.iptable_raw);
56}
57 28
58static unsigned int 29 if (hook == NF_INET_LOCAL_OUT &&
59ipt_local_hook(unsigned int hook, 30 (skb->len < sizeof(struct iphdr) ||
60 struct sk_buff *skb, 31 ip_hdrlen(skb) < sizeof(struct iphdr)))
61 const struct net_device *in, 32 /* root is playing with raw sockets. */
62 const struct net_device *out,
63 int (*okfn)(struct sk_buff *))
64{
65 /* root is playing with raw sockets. */
66 if (skb->len < sizeof(struct iphdr) ||
67 ip_hdrlen(skb) < sizeof(struct iphdr))
68 return NF_ACCEPT; 33 return NF_ACCEPT;
69 return ipt_do_table(skb, hook, in, out, 34
70 dev_net(out)->ipv4.iptable_raw); 35 net = dev_net((in != NULL) ? in : out);
36 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
71} 37}
72 38
73/* 'raw' is the very first table. */ 39static struct nf_hook_ops *rawtable_ops __read_mostly;
74static struct nf_hook_ops ipt_ops[] __read_mostly = {
75 {
76 .hook = ipt_hook,
77 .pf = NFPROTO_IPV4,
78 .hooknum = NF_INET_PRE_ROUTING,
79 .priority = NF_IP_PRI_RAW,
80 .owner = THIS_MODULE,
81 },
82 {
83 .hook = ipt_local_hook,
84 .pf = NFPROTO_IPV4,
85 .hooknum = NF_INET_LOCAL_OUT,
86 .priority = NF_IP_PRI_RAW,
87 .owner = THIS_MODULE,
88 },
89};
90 40
91static int __net_init iptable_raw_net_init(struct net *net) 41static int __net_init iptable_raw_net_init(struct net *net)
92{ 42{
93 /* Register table */ 43 struct ipt_replace *repl;
44
45 repl = ipt_alloc_initial_table(&packet_raw);
46 if (repl == NULL)
47 return -ENOMEM;
94 net->ipv4.iptable_raw = 48 net->ipv4.iptable_raw =
95 ipt_register_table(net, &packet_raw, &initial_table.repl); 49 ipt_register_table(net, &packet_raw, repl);
50 kfree(repl);
96 if (IS_ERR(net->ipv4.iptable_raw)) 51 if (IS_ERR(net->ipv4.iptable_raw))
97 return PTR_ERR(net->ipv4.iptable_raw); 52 return PTR_ERR(net->ipv4.iptable_raw);
98 return 0; 53 return 0;
@@ -100,7 +55,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
100 55
101static void __net_exit iptable_raw_net_exit(struct net *net) 56static void __net_exit iptable_raw_net_exit(struct net *net)
102{ 57{
103 ipt_unregister_table(net->ipv4.iptable_raw); 58 ipt_unregister_table(net, net->ipv4.iptable_raw);
104} 59}
105 60
106static struct pernet_operations iptable_raw_net_ops = { 61static struct pernet_operations iptable_raw_net_ops = {
@@ -117,9 +72,11 @@ static int __init iptable_raw_init(void)
117 return ret; 72 return ret;
118 73
119 /* Register hooks */ 74 /* Register hooks */
120 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 75 rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
121 if (ret < 0) 76 if (IS_ERR(rawtable_ops)) {
77 ret = PTR_ERR(rawtable_ops);
122 goto cleanup_table; 78 goto cleanup_table;
79 }
123 80
124 return ret; 81 return ret;
125 82
@@ -130,7 +87,7 @@ static int __init iptable_raw_init(void)
130 87
131static void __exit iptable_raw_fini(void) 88static void __exit iptable_raw_fini(void)
132{ 89{
133 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 90 xt_hook_unlink(&packet_raw, rawtable_ops);
134 unregister_pernet_subsys(&iptable_raw_net_ops); 91 unregister_pernet_subsys(&iptable_raw_net_ops);
135} 92}
136 93
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 3bd3d6388da5..be45bdc4c602 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netfilter_ipv4/ip_tables.h> 19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <linux/slab.h>
20#include <net/ip.h> 21#include <net/ip.h>
21 22
22MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
@@ -27,109 +28,44 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules");
27 (1 << NF_INET_FORWARD) | \ 28 (1 << NF_INET_FORWARD) | \
28 (1 << NF_INET_LOCAL_OUT) 29 (1 << NF_INET_LOCAL_OUT)
29 30
30static const struct
31{
32 struct ipt_replace repl;
33 struct ipt_standard entries[3];
34 struct ipt_error term;
35} initial_table __net_initdata = {
36 .repl = {
37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS,
39 .num_entries = 4,
40 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
41 .hook_entry = {
42 [NF_INET_LOCAL_IN] = 0,
43 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
44 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
45 },
46 .underflow = {
47 [NF_INET_LOCAL_IN] = 0,
48 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
49 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
50 },
51 },
52 .entries = {
53 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
54 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
56 },
57 .term = IPT_ERROR_INIT, /* ERROR */
58};
59
60static const struct xt_table security_table = { 31static const struct xt_table security_table = {
61 .name = "security", 32 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS, 33 .valid_hooks = SECURITY_VALID_HOOKS,
63 .me = THIS_MODULE, 34 .me = THIS_MODULE,
64 .af = NFPROTO_IPV4, 35 .af = NFPROTO_IPV4,
36 .priority = NF_IP_PRI_SECURITY,
65}; 37};
66 38
67static unsigned int 39static unsigned int
68ipt_local_in_hook(unsigned int hook, 40iptable_security_hook(unsigned int hook, struct sk_buff *skb,
69 struct sk_buff *skb, 41 const struct net_device *in,
70 const struct net_device *in, 42 const struct net_device *out,
71 const struct net_device *out, 43 int (*okfn)(struct sk_buff *))
72 int (*okfn)(struct sk_buff *))
73{
74 return ipt_do_table(skb, hook, in, out,
75 dev_net(in)->ipv4.iptable_security);
76}
77
78static unsigned int
79ipt_forward_hook(unsigned int hook,
80 struct sk_buff *skb,
81 const struct net_device *in,
82 const struct net_device *out,
83 int (*okfn)(struct sk_buff *))
84{ 44{
85 return ipt_do_table(skb, hook, in, out, 45 const struct net *net;
86 dev_net(in)->ipv4.iptable_security);
87}
88 46
89static unsigned int 47 if (hook == NF_INET_LOCAL_OUT &&
90ipt_local_out_hook(unsigned int hook, 48 (skb->len < sizeof(struct iphdr) ||
91 struct sk_buff *skb, 49 ip_hdrlen(skb) < sizeof(struct iphdr)))
92 const struct net_device *in, 50 /* Somebody is playing with raw sockets. */
93 const struct net_device *out,
94 int (*okfn)(struct sk_buff *))
95{
96 /* Somebody is playing with raw sockets. */
97 if (skb->len < sizeof(struct iphdr) ||
98 ip_hdrlen(skb) < sizeof(struct iphdr))
99 return NF_ACCEPT; 51 return NF_ACCEPT;
100 return ipt_do_table(skb, hook, in, out, 52
101 dev_net(out)->ipv4.iptable_security); 53 net = dev_net((in != NULL) ? in : out);
54 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
102} 55}
103 56
104static struct nf_hook_ops ipt_ops[] __read_mostly = { 57static struct nf_hook_ops *sectbl_ops __read_mostly;
105 {
106 .hook = ipt_local_in_hook,
107 .owner = THIS_MODULE,
108 .pf = NFPROTO_IPV4,
109 .hooknum = NF_INET_LOCAL_IN,
110 .priority = NF_IP_PRI_SECURITY,
111 },
112 {
113 .hook = ipt_forward_hook,
114 .owner = THIS_MODULE,
115 .pf = NFPROTO_IPV4,
116 .hooknum = NF_INET_FORWARD,
117 .priority = NF_IP_PRI_SECURITY,
118 },
119 {
120 .hook = ipt_local_out_hook,
121 .owner = THIS_MODULE,
122 .pf = NFPROTO_IPV4,
123 .hooknum = NF_INET_LOCAL_OUT,
124 .priority = NF_IP_PRI_SECURITY,
125 },
126};
127 58
128static int __net_init iptable_security_net_init(struct net *net) 59static int __net_init iptable_security_net_init(struct net *net)
129{ 60{
130 net->ipv4.iptable_security = 61 struct ipt_replace *repl;
131 ipt_register_table(net, &security_table, &initial_table.repl);
132 62
63 repl = ipt_alloc_initial_table(&security_table);
64 if (repl == NULL)
65 return -ENOMEM;
66 net->ipv4.iptable_security =
67 ipt_register_table(net, &security_table, repl);
68 kfree(repl);
133 if (IS_ERR(net->ipv4.iptable_security)) 69 if (IS_ERR(net->ipv4.iptable_security))
134 return PTR_ERR(net->ipv4.iptable_security); 70 return PTR_ERR(net->ipv4.iptable_security);
135 71
@@ -138,7 +74,7 @@ static int __net_init iptable_security_net_init(struct net *net)
138 74
139static void __net_exit iptable_security_net_exit(struct net *net) 75static void __net_exit iptable_security_net_exit(struct net *net)
140{ 76{
141 ipt_unregister_table(net->ipv4.iptable_security); 77 ipt_unregister_table(net, net->ipv4.iptable_security);
142} 78}
143 79
144static struct pernet_operations iptable_security_net_ops = { 80static struct pernet_operations iptable_security_net_ops = {
@@ -154,9 +90,11 @@ static int __init iptable_security_init(void)
154 if (ret < 0) 90 if (ret < 0)
155 return ret; 91 return ret;
156 92
157 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 93 sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
158 if (ret < 0) 94 if (IS_ERR(sectbl_ops)) {
95 ret = PTR_ERR(sectbl_ops);
159 goto cleanup_table; 96 goto cleanup_table;
97 }
160 98
161 return ret; 99 return ret;
162 100
@@ -167,7 +105,7 @@ cleanup_table:
167 105
168static void __exit iptable_security_fini(void) 106static void __exit iptable_security_fini(void)
169{ 107{
170 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 108 xt_hook_unlink(&security_table, sectbl_ops);
171 unregister_pernet_subsys(&iptable_security_net_ops); 109 unregister_pernet_subsys(&iptable_security_net_ops);
172} 110}
173 111
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d1ea38a7c490..2bb1f87051c4 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -22,6 +22,7 @@
22#include <net/netfilter/nf_conntrack_helper.h> 22#include <net/netfilter/nf_conntrack_helper.h>
23#include <net/netfilter/nf_conntrack_l4proto.h> 23#include <net/netfilter/nf_conntrack_l4proto.h>
24#include <net/netfilter/nf_conntrack_l3proto.h> 24#include <net/netfilter/nf_conntrack_l3proto.h>
25#include <net/netfilter/nf_conntrack_zones.h>
25#include <net/netfilter/nf_conntrack_core.h> 26#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 27#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
27#include <net/netfilter/nf_nat_helper.h> 28#include <net/netfilter/nf_nat_helper.h>
@@ -266,7 +267,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
266 return -EINVAL; 267 return -EINVAL;
267 } 268 }
268 269
269 h = nf_conntrack_find_get(sock_net(sk), &tuple); 270 h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
270 if (h) { 271 if (h) {
271 struct sockaddr_in sin; 272 struct sockaddr_in sin;
272 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 273 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7afd39b5b781..7404bde95994 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -18,6 +18,7 @@
18#include <net/netfilter/nf_conntrack_tuple.h> 18#include <net/netfilter/nf_conntrack_tuple.h>
19#include <net/netfilter/nf_conntrack_l4proto.h> 19#include <net/netfilter/nf_conntrack_l4proto.h>
20#include <net/netfilter/nf_conntrack_core.h> 20#include <net/netfilter/nf_conntrack_core.h>
21#include <net/netfilter/nf_conntrack_zones.h>
21#include <net/netfilter/nf_log.h> 22#include <net/netfilter/nf_log.h>
22 23
23static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; 24static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
@@ -114,13 +115,14 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
114 115
115/* Returns conntrack if it dealt with ICMP, and filled in skb fields */ 116/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
116static int 117static int
117icmp_error_message(struct net *net, struct sk_buff *skb, 118icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
118 enum ip_conntrack_info *ctinfo, 119 enum ip_conntrack_info *ctinfo,
119 unsigned int hooknum) 120 unsigned int hooknum)
120{ 121{
121 struct nf_conntrack_tuple innertuple, origtuple; 122 struct nf_conntrack_tuple innertuple, origtuple;
122 const struct nf_conntrack_l4proto *innerproto; 123 const struct nf_conntrack_l4proto *innerproto;
123 const struct nf_conntrack_tuple_hash *h; 124 const struct nf_conntrack_tuple_hash *h;
125 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
124 126
125 NF_CT_ASSERT(skb->nfct == NULL); 127 NF_CT_ASSERT(skb->nfct == NULL);
126 128
@@ -146,7 +148,7 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
146 148
147 *ctinfo = IP_CT_RELATED; 149 *ctinfo = IP_CT_RELATED;
148 150
149 h = nf_conntrack_find_get(net, &innertuple); 151 h = nf_conntrack_find_get(net, zone, &innertuple);
150 if (!h) { 152 if (!h) {
151 pr_debug("icmp_error_message: no match\n"); 153 pr_debug("icmp_error_message: no match\n");
152 return -NF_ACCEPT; 154 return -NF_ACCEPT;
@@ -163,7 +165,8 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
163 165
164/* Small and modified version of icmp_rcv */ 166/* Small and modified version of icmp_rcv */
165static int 167static int
166icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 168icmp_error(struct net *net, struct nf_conn *tmpl,
169 struct sk_buff *skb, unsigned int dataoff,
167 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 170 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
168{ 171{
169 const struct icmphdr *icmph; 172 const struct icmphdr *icmph;
@@ -208,7 +211,7 @@ icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
208 icmph->type != ICMP_REDIRECT) 211 icmph->type != ICMP_REDIRECT)
209 return NF_ACCEPT; 212 return NF_ACCEPT;
210 213
211 return icmp_error_message(net, skb, ctinfo, hooknum); 214 return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
212} 215}
213 216
214#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 217#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 331ead3ebd1b..cb763ae9ed90 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -17,6 +17,10 @@
17#include <linux/netfilter_bridge.h> 17#include <linux/netfilter_bridge.h>
18#include <linux/netfilter_ipv4.h> 18#include <linux/netfilter_ipv4.h>
19#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 19#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
20#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
21#include <net/netfilter/nf_conntrack.h>
22#endif
23#include <net/netfilter/nf_conntrack_zones.h>
20 24
21/* Returns new sk_buff, or NULL */ 25/* Returns new sk_buff, or NULL */
22static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) 26static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
@@ -38,15 +42,22 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
38static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, 42static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
39 struct sk_buff *skb) 43 struct sk_buff *skb)
40{ 44{
45 u16 zone = NF_CT_DEFAULT_ZONE;
46
47#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
48 if (skb->nfct)
49 zone = nf_ct_zone((struct nf_conn *)skb->nfct);
50#endif
51
41#ifdef CONFIG_BRIDGE_NETFILTER 52#ifdef CONFIG_BRIDGE_NETFILTER
42 if (skb->nf_bridge && 53 if (skb->nf_bridge &&
43 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 54 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
44 return IP_DEFRAG_CONNTRACK_BRIDGE_IN; 55 return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
45#endif 56#endif
46 if (hooknum == NF_INET_PRE_ROUTING) 57 if (hooknum == NF_INET_PRE_ROUTING)
47 return IP_DEFRAG_CONNTRACK_IN; 58 return IP_DEFRAG_CONNTRACK_IN + zone;
48 else 59 else
49 return IP_DEFRAG_CONNTRACK_OUT; 60 return IP_DEFRAG_CONNTRACK_OUT + zone;
50} 61}
51 62
52static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, 63static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
@@ -59,7 +70,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
59#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE) 70#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
60 /* Previously seen (loopback)? Ignore. Do this before 71 /* Previously seen (loopback)? Ignore. Do this before
61 fragment check. */ 72 fragment check. */
62 if (skb->nfct) 73 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
63 return NF_ACCEPT; 74 return NF_ACCEPT;
64#endif 75#endif
65#endif 76#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 26066a2327ad..4f8bddb760c9 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/timer.h> 13#include <linux/timer.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/gfp.h>
15#include <net/checksum.h> 16#include <net/checksum.h>
16#include <net/icmp.h> 17#include <net/icmp.h>
17#include <net/ip.h> 18#include <net/ip.h>
@@ -30,6 +31,7 @@
30#include <net/netfilter/nf_conntrack_helper.h> 31#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_l3proto.h> 32#include <net/netfilter/nf_conntrack_l3proto.h>
32#include <net/netfilter/nf_conntrack_l4proto.h> 33#include <net/netfilter/nf_conntrack_l4proto.h>
34#include <net/netfilter/nf_conntrack_zones.h>
33 35
34static DEFINE_SPINLOCK(nf_nat_lock); 36static DEFINE_SPINLOCK(nf_nat_lock);
35 37
@@ -69,13 +71,14 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
69 71
70/* We keep an extra hash for each conntrack, for fast searching. */ 72/* We keep an extra hash for each conntrack, for fast searching. */
71static inline unsigned int 73static inline unsigned int
72hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) 74hash_by_src(const struct net *net, u16 zone,
75 const struct nf_conntrack_tuple *tuple)
73{ 76{
74 unsigned int hash; 77 unsigned int hash;
75 78
76 /* Original src, to ensure we map it consistently if poss. */ 79 /* Original src, to ensure we map it consistently if poss. */
77 hash = jhash_3words((__force u32)tuple->src.u3.ip, 80 hash = jhash_3words((__force u32)tuple->src.u3.ip,
78 (__force u32)tuple->src.u.all, 81 (__force u32)tuple->src.u.all ^ zone,
79 tuple->dst.protonum, 0); 82 tuple->dst.protonum, 0);
80 return ((u64)hash * net->ipv4.nat_htable_size) >> 32; 83 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
81} 84}
@@ -139,12 +142,12 @@ same_src(const struct nf_conn *ct,
139 142
140/* Only called for SRC manip */ 143/* Only called for SRC manip */
141static int 144static int
142find_appropriate_src(struct net *net, 145find_appropriate_src(struct net *net, u16 zone,
143 const struct nf_conntrack_tuple *tuple, 146 const struct nf_conntrack_tuple *tuple,
144 struct nf_conntrack_tuple *result, 147 struct nf_conntrack_tuple *result,
145 const struct nf_nat_range *range) 148 const struct nf_nat_range *range)
146{ 149{
147 unsigned int h = hash_by_src(net, tuple); 150 unsigned int h = hash_by_src(net, zone, tuple);
148 const struct nf_conn_nat *nat; 151 const struct nf_conn_nat *nat;
149 const struct nf_conn *ct; 152 const struct nf_conn *ct;
150 const struct hlist_node *n; 153 const struct hlist_node *n;
@@ -152,7 +155,7 @@ find_appropriate_src(struct net *net,
152 rcu_read_lock(); 155 rcu_read_lock();
153 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) { 156 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
154 ct = nat->ct; 157 ct = nat->ct;
155 if (same_src(ct, tuple)) { 158 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
156 /* Copy source part from reply tuple. */ 159 /* Copy source part from reply tuple. */
157 nf_ct_invert_tuplepr(result, 160 nf_ct_invert_tuplepr(result,
158 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 161 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -175,7 +178,7 @@ find_appropriate_src(struct net *net,
175 the ip with the lowest src-ip/dst-ip/proto usage. 178 the ip with the lowest src-ip/dst-ip/proto usage.
176*/ 179*/
177static void 180static void
178find_best_ips_proto(struct nf_conntrack_tuple *tuple, 181find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
179 const struct nf_nat_range *range, 182 const struct nf_nat_range *range,
180 const struct nf_conn *ct, 183 const struct nf_conn *ct,
181 enum nf_nat_manip_type maniptype) 184 enum nf_nat_manip_type maniptype)
@@ -209,7 +212,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
209 maxip = ntohl(range->max_ip); 212 maxip = ntohl(range->max_ip);
210 j = jhash_2words((__force u32)tuple->src.u3.ip, 213 j = jhash_2words((__force u32)tuple->src.u3.ip,
211 range->flags & IP_NAT_RANGE_PERSISTENT ? 214 range->flags & IP_NAT_RANGE_PERSISTENT ?
212 0 : (__force u32)tuple->dst.u3.ip, 0); 215 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
213 j = ((u64)j * (maxip - minip + 1)) >> 32; 216 j = ((u64)j * (maxip - minip + 1)) >> 32;
214 *var_ipp = htonl(minip + j); 217 *var_ipp = htonl(minip + j);
215} 218}
@@ -229,6 +232,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
229{ 232{
230 struct net *net = nf_ct_net(ct); 233 struct net *net = nf_ct_net(ct);
231 const struct nf_nat_protocol *proto; 234 const struct nf_nat_protocol *proto;
235 u16 zone = nf_ct_zone(ct);
232 236
233 /* 1) If this srcip/proto/src-proto-part is currently mapped, 237 /* 1) If this srcip/proto/src-proto-part is currently mapped,
234 and that same mapping gives a unique tuple within the given 238 and that same mapping gives a unique tuple within the given
@@ -239,7 +243,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
239 manips not an issue. */ 243 manips not an issue. */
240 if (maniptype == IP_NAT_MANIP_SRC && 244 if (maniptype == IP_NAT_MANIP_SRC &&
241 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { 245 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
242 if (find_appropriate_src(net, orig_tuple, tuple, range)) { 246 if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
243 pr_debug("get_unique_tuple: Found current src map\n"); 247 pr_debug("get_unique_tuple: Found current src map\n");
244 if (!nf_nat_used_tuple(tuple, ct)) 248 if (!nf_nat_used_tuple(tuple, ct))
245 return; 249 return;
@@ -249,7 +253,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
249 /* 2) Select the least-used IP/proto combination in the given 253 /* 2) Select the least-used IP/proto combination in the given
250 range. */ 254 range. */
251 *tuple = *orig_tuple; 255 *tuple = *orig_tuple;
252 find_best_ips_proto(tuple, range, ct, maniptype); 256 find_best_ips_proto(zone, tuple, range, ct, maniptype);
253 257
254 /* 3) The per-protocol part of the manip is made to map into 258 /* 3) The per-protocol part of the manip is made to map into
255 the range to make a unique tuple. */ 259 the range to make a unique tuple. */
@@ -327,7 +331,8 @@ nf_nat_setup_info(struct nf_conn *ct,
327 if (have_to_hash) { 331 if (have_to_hash) {
328 unsigned int srchash; 332 unsigned int srchash;
329 333
330 srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 334 srchash = hash_by_src(net, nf_ct_zone(ct),
335 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
331 spin_lock_bh(&nf_nat_lock); 336 spin_lock_bh(&nf_nat_lock);
332 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 337 /* nf_conntrack_alter_reply might re-allocate exntension aera */
333 nat = nfct_nat(ct); 338 nat = nfct_nat(ct);
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index a1d5d58a58bf..86e0e84ff0a0 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -27,76 +27,29 @@ MODULE_ALIAS("ip_nat_ftp");
27 27
28/* FIXME: Time out? --RR */ 28/* FIXME: Time out? --RR */
29 29
30static int 30static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
31mangle_rfc959_packet(struct sk_buff *skb, 31 char *buffer, size_t buflen,
32 __be32 newip, 32 __be32 addr, u16 port)
33 u_int16_t port,
34 unsigned int matchoff,
35 unsigned int matchlen,
36 struct nf_conn *ct,
37 enum ip_conntrack_info ctinfo)
38{ 33{
39 char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")]; 34 switch (type) {
40 35 case NF_CT_FTP_PORT:
41 sprintf(buffer, "%u,%u,%u,%u,%u,%u", 36 case NF_CT_FTP_PASV:
42 NIPQUAD(newip), port>>8, port&0xFF); 37 return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
43 38 ((unsigned char *)&addr)[0],
44 pr_debug("calling nf_nat_mangle_tcp_packet\n"); 39 ((unsigned char *)&addr)[1],
45 40 ((unsigned char *)&addr)[2],
46 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, 41 ((unsigned char *)&addr)[3],
47 matchlen, buffer, strlen(buffer)); 42 port >> 8,
48} 43 port & 0xFF);
49 44 case NF_CT_FTP_EPRT:
50/* |1|132.235.1.2|6275| */ 45 return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port);
51static int 46 case NF_CT_FTP_EPSV:
52mangle_eprt_packet(struct sk_buff *skb, 47 return snprintf(buffer, buflen, "|||%u|", port);
53 __be32 newip, 48 }
54 u_int16_t port,
55 unsigned int matchoff,
56 unsigned int matchlen,
57 struct nf_conn *ct,
58 enum ip_conntrack_info ctinfo)
59{
60 char buffer[sizeof("|1|255.255.255.255|65535|")];
61
62 sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
63
64 pr_debug("calling nf_nat_mangle_tcp_packet\n");
65
66 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
67 matchlen, buffer, strlen(buffer));
68}
69
70/* |1|132.235.1.2|6275| */
71static int
72mangle_epsv_packet(struct sk_buff *skb,
73 __be32 newip,
74 u_int16_t port,
75 unsigned int matchoff,
76 unsigned int matchlen,
77 struct nf_conn *ct,
78 enum ip_conntrack_info ctinfo)
79{
80 char buffer[sizeof("|||65535|")];
81
82 sprintf(buffer, "|||%u|", port);
83
84 pr_debug("calling nf_nat_mangle_tcp_packet\n");
85 49
86 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, 50 return 0;
87 matchlen, buffer, strlen(buffer));
88} 51}
89 52
90static int (*mangle[])(struct sk_buff *, __be32, u_int16_t,
91 unsigned int, unsigned int, struct nf_conn *,
92 enum ip_conntrack_info)
93= {
94 [NF_CT_FTP_PORT] = mangle_rfc959_packet,
95 [NF_CT_FTP_PASV] = mangle_rfc959_packet,
96 [NF_CT_FTP_EPRT] = mangle_eprt_packet,
97 [NF_CT_FTP_EPSV] = mangle_epsv_packet
98};
99
100/* So, this packet has hit the connection tracking matching code. 53/* So, this packet has hit the connection tracking matching code.
101 Mangle it, and change the expectation to match the new version. */ 54 Mangle it, and change the expectation to match the new version. */
102static unsigned int nf_nat_ftp(struct sk_buff *skb, 55static unsigned int nf_nat_ftp(struct sk_buff *skb,
@@ -110,6 +63,8 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
110 u_int16_t port; 63 u_int16_t port;
111 int dir = CTINFO2DIR(ctinfo); 64 int dir = CTINFO2DIR(ctinfo);
112 struct nf_conn *ct = exp->master; 65 struct nf_conn *ct = exp->master;
66 char buffer[sizeof("|1|255.255.255.255|65535|")];
67 unsigned int buflen;
113 68
114 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); 69 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
115 70
@@ -132,11 +87,21 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
132 if (port == 0) 87 if (port == 0)
133 return NF_DROP; 88 return NF_DROP;
134 89
135 if (!mangle[type](skb, newip, port, matchoff, matchlen, ct, ctinfo)) { 90 buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port);
136 nf_ct_unexpect_related(exp); 91 if (!buflen)
137 return NF_DROP; 92 goto out;
138 } 93
94 pr_debug("calling nf_nat_mangle_tcp_packet\n");
95
96 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
97 matchlen, buffer, buflen))
98 goto out;
99
139 return NF_ACCEPT; 100 return NF_ACCEPT;
101
102out:
103 nf_ct_unexpect_related(exp);
104 return NF_DROP;
140} 105}
141 106
142static void __exit nf_nat_ftp_fini(void) 107static void __exit nf_nat_ftp_fini(void)
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 7f10a6be0191..4a0c6b548eee 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/gfp.h>
11#include <linux/kmod.h> 12#include <linux/kmod.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/timer.h> 14#include <linux/timer.h>
@@ -141,6 +142,17 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
141 return 1; 142 return 1;
142} 143}
143 144
145void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
146 __be32 seq, s16 off)
147{
148 if (!off)
149 return;
150 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
151 adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
152 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155
144/* Generic function for mangling variable-length address changes inside 156/* Generic function for mangling variable-length address changes inside
145 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX 157 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
146 * command in FTP). 158 * command in FTP).
@@ -149,14 +161,13 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
149 * skb enlargement, ... 161 * skb enlargement, ...
150 * 162 *
151 * */ 163 * */
152int 164int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
153nf_nat_mangle_tcp_packet(struct sk_buff *skb, 165 struct nf_conn *ct,
154 struct nf_conn *ct, 166 enum ip_conntrack_info ctinfo,
155 enum ip_conntrack_info ctinfo, 167 unsigned int match_offset,
156 unsigned int match_offset, 168 unsigned int match_len,
157 unsigned int match_len, 169 const char *rep_buffer,
158 const char *rep_buffer, 170 unsigned int rep_len, bool adjust)
159 unsigned int rep_len)
160{ 171{
161 struct rtable *rt = skb_rtable(skb); 172 struct rtable *rt = skb_rtable(skb);
162 struct iphdr *iph; 173 struct iphdr *iph;
@@ -202,16 +213,13 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
202 inet_proto_csum_replace2(&tcph->check, skb, 213 inet_proto_csum_replace2(&tcph->check, skb,
203 htons(oldlen), htons(datalen), 1); 214 htons(oldlen), htons(datalen), 1);
204 215
205 if (rep_len != match_len) { 216 if (adjust && rep_len != match_len)
206 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 217 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
207 adjust_tcp_sequence(ntohl(tcph->seq), 218 (int)rep_len - (int)match_len);
208 (int)rep_len - (int)match_len, 219
209 ct, ctinfo);
210 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
211 }
212 return 1; 220 return 1;
213} 221}
214EXPORT_SYMBOL(nf_nat_mangle_tcp_packet); 222EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
215 223
216/* Generic function for mangling variable-length address changes inside 224/* Generic function for mangling variable-length address changes inside
217 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX 225 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 9eb171056c63..4c060038d29f 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -25,6 +25,7 @@
25#include <net/netfilter/nf_nat_rule.h> 25#include <net/netfilter/nf_nat_rule.h>
26#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_expect.h> 27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_zones.h>
28#include <linux/netfilter/nf_conntrack_proto_gre.h> 29#include <linux/netfilter/nf_conntrack_proto_gre.h>
29#include <linux/netfilter/nf_conntrack_pptp.h> 30#include <linux/netfilter/nf_conntrack_pptp.h>
30 31
@@ -74,7 +75,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
74 75
75 pr_debug("trying to unexpect other dir: "); 76 pr_debug("trying to unexpect other dir: ");
76 nf_ct_dump_tuple_ip(&t); 77 nf_ct_dump_tuple_ip(&t);
77 other_exp = nf_ct_expect_find_get(net, &t); 78 other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
78 if (other_exp) { 79 if (other_exp) {
79 nf_ct_unexpect_related(other_exp); 80 nf_ct_unexpect_related(other_exp);
80 nf_ct_expect_put(other_exp); 81 nf_ct_expect_put(other_exp);
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 9e81e0dfb4ec..26de2c1f7fab 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -15,6 +15,7 @@
15#include <linux/kmod.h> 15#include <linux/kmod.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/slab.h>
18#include <net/checksum.h> 19#include <net/checksum.h>
19#include <net/route.h> 20#include <net/route.h>
20#include <linux/bitops.h> 21#include <linux/bitops.h>
@@ -28,36 +29,6 @@
28 (1 << NF_INET_POST_ROUTING) | \ 29 (1 << NF_INET_POST_ROUTING) | \
29 (1 << NF_INET_LOCAL_OUT)) 30 (1 << NF_INET_LOCAL_OUT))
30 31
31static const struct
32{
33 struct ipt_replace repl;
34 struct ipt_standard entries[3];
35 struct ipt_error term;
36} nat_initial_table __net_initdata = {
37 .repl = {
38 .name = "nat",
39 .valid_hooks = NAT_VALID_HOOKS,
40 .num_entries = 4,
41 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
46 },
47 .underflow = {
48 [NF_INET_PRE_ROUTING] = 0,
49 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
50 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
51 },
52 },
53 .entries = {
54 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
57 },
58 .term = IPT_ERROR_INIT, /* ERROR */
59};
60
61static const struct xt_table nat_table = { 32static const struct xt_table nat_table = {
62 .name = "nat", 33 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 34 .valid_hooks = NAT_VALID_HOOKS,
@@ -186,8 +157,13 @@ static struct xt_target ipt_dnat_reg __read_mostly = {
186 157
187static int __net_init nf_nat_rule_net_init(struct net *net) 158static int __net_init nf_nat_rule_net_init(struct net *net)
188{ 159{
189 net->ipv4.nat_table = ipt_register_table(net, &nat_table, 160 struct ipt_replace *repl;
190 &nat_initial_table.repl); 161
162 repl = ipt_alloc_initial_table(&nat_table);
163 if (repl == NULL)
164 return -ENOMEM;
165 net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
166 kfree(repl);
191 if (IS_ERR(net->ipv4.nat_table)) 167 if (IS_ERR(net->ipv4.nat_table))
192 return PTR_ERR(net->ipv4.nat_table); 168 return PTR_ERR(net->ipv4.nat_table);
193 return 0; 169 return 0;
@@ -195,7 +171,7 @@ static int __net_init nf_nat_rule_net_init(struct net *net)
195 171
196static void __net_exit nf_nat_rule_net_exit(struct net *net) 172static void __net_exit nf_nat_rule_net_exit(struct net *net)
197{ 173{
198 ipt_unregister_table(net->ipv4.nat_table); 174 ipt_unregister_table(net, net->ipv4.nat_table);
199} 175}
200 176
201static struct pernet_operations nf_nat_rule_net_ops = { 177static struct pernet_operations nf_nat_rule_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 07d61a57613c..11b538deaaec 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -1,4 +1,4 @@
1/* SIP extension for UDP NAT alteration. 1/* SIP extension for NAT alteration.
2 * 2 *
3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> 3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
4 * based on RR's ip_nat_ftp.c and other modules. 4 * based on RR's ip_nat_ftp.c and other modules.
@@ -15,6 +15,7 @@
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <net/ip.h> 16#include <net/ip.h>
17#include <linux/udp.h> 17#include <linux/udp.h>
18#include <linux/tcp.h>
18 19
19#include <net/netfilter/nf_nat.h> 20#include <net/netfilter/nf_nat.h>
20#include <net/netfilter/nf_nat_helper.h> 21#include <net/netfilter/nf_nat_helper.h>
@@ -29,25 +30,42 @@ MODULE_DESCRIPTION("SIP NAT helper");
29MODULE_ALIAS("ip_nat_sip"); 30MODULE_ALIAS("ip_nat_sip");
30 31
31 32
32static unsigned int mangle_packet(struct sk_buff *skb, 33static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
33 const char **dptr, unsigned int *datalen, 34 const char **dptr, unsigned int *datalen,
34 unsigned int matchoff, unsigned int matchlen, 35 unsigned int matchoff, unsigned int matchlen,
35 const char *buffer, unsigned int buflen) 36 const char *buffer, unsigned int buflen)
36{ 37{
37 enum ip_conntrack_info ctinfo; 38 enum ip_conntrack_info ctinfo;
38 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 39 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
39 40 struct tcphdr *th;
40 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen, 41 unsigned int baseoff;
41 buffer, buflen)) 42
42 return 0; 43 if (nf_ct_protonum(ct) == IPPROTO_TCP) {
44 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
45 baseoff = ip_hdrlen(skb) + th->doff * 4;
46 matchoff += dataoff - baseoff;
47
48 if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
49 matchoff, matchlen,
50 buffer, buflen, false))
51 return 0;
52 } else {
53 baseoff = ip_hdrlen(skb) + sizeof(struct udphdr);
54 matchoff += dataoff - baseoff;
55
56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
57 matchoff, matchlen,
58 buffer, buflen))
59 return 0;
60 }
43 61
44 /* Reload data pointer and adjust datalen value */ 62 /* Reload data pointer and adjust datalen value */
45 *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); 63 *dptr = skb->data + dataoff;
46 *datalen += buflen - matchlen; 64 *datalen += buflen - matchlen;
47 return 1; 65 return 1;
48} 66}
49 67
50static int map_addr(struct sk_buff *skb, 68static int map_addr(struct sk_buff *skb, unsigned int dataoff,
51 const char **dptr, unsigned int *datalen, 69 const char **dptr, unsigned int *datalen,
52 unsigned int matchoff, unsigned int matchlen, 70 unsigned int matchoff, unsigned int matchlen,
53 union nf_inet_addr *addr, __be16 port) 71 union nf_inet_addr *addr, __be16 port)
@@ -76,11 +94,11 @@ static int map_addr(struct sk_buff *skb,
76 94
77 buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); 95 buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
78 96
79 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 97 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
80 buffer, buflen); 98 buffer, buflen);
81} 99}
82 100
83static int map_sip_addr(struct sk_buff *skb, 101static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
84 const char **dptr, unsigned int *datalen, 102 const char **dptr, unsigned int *datalen,
85 enum sip_header_types type) 103 enum sip_header_types type)
86{ 104{
@@ -93,16 +111,18 @@ static int map_sip_addr(struct sk_buff *skb,
93 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL, 111 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
94 &matchoff, &matchlen, &addr, &port) <= 0) 112 &matchoff, &matchlen, &addr, &port) <= 0)
95 return 1; 113 return 1;
96 return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port); 114 return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
115 &addr, port);
97} 116}
98 117
99static unsigned int ip_nat_sip(struct sk_buff *skb, 118static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
100 const char **dptr, unsigned int *datalen) 119 const char **dptr, unsigned int *datalen)
101{ 120{
102 enum ip_conntrack_info ctinfo; 121 enum ip_conntrack_info ctinfo;
103 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 122 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
104 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 123 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
105 unsigned int dataoff, matchoff, matchlen; 124 unsigned int coff, matchoff, matchlen;
125 enum sip_header_types hdr;
106 union nf_inet_addr addr; 126 union nf_inet_addr addr;
107 __be16 port; 127 __be16 port;
108 int request, in_header; 128 int request, in_header;
@@ -112,16 +132,21 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
112 if (ct_sip_parse_request(ct, *dptr, *datalen, 132 if (ct_sip_parse_request(ct, *dptr, *datalen,
113 &matchoff, &matchlen, 133 &matchoff, &matchlen,
114 &addr, &port) > 0 && 134 &addr, &port) > 0 &&
115 !map_addr(skb, dptr, datalen, matchoff, matchlen, 135 !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
116 &addr, port)) 136 &addr, port))
117 return NF_DROP; 137 return NF_DROP;
118 request = 1; 138 request = 1;
119 } else 139 } else
120 request = 0; 140 request = 0;
121 141
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 hdr = SIP_HDR_VIA_TCP;
144 else
145 hdr = SIP_HDR_VIA_UDP;
146
122 /* Translate topmost Via header and parameters */ 147 /* Translate topmost Via header and parameters */
123 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, 148 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
124 SIP_HDR_VIA, NULL, &matchoff, &matchlen, 149 hdr, NULL, &matchoff, &matchlen,
125 &addr, &port) > 0) { 150 &addr, &port) > 0) {
126 unsigned int matchend, poff, plen, buflen, n; 151 unsigned int matchend, poff, plen, buflen, n;
127 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 152 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
@@ -138,7 +163,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
138 goto next; 163 goto next;
139 } 164 }
140 165
141 if (!map_addr(skb, dptr, datalen, matchoff, matchlen, 166 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
142 &addr, port)) 167 &addr, port))
143 return NF_DROP; 168 return NF_DROP;
144 169
@@ -153,8 +178,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
153 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { 178 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
154 buflen = sprintf(buffer, "%pI4", 179 buflen = sprintf(buffer, "%pI4",
155 &ct->tuplehash[!dir].tuple.dst.u3.ip); 180 &ct->tuplehash[!dir].tuple.dst.u3.ip);
156 if (!mangle_packet(skb, dptr, datalen, poff, plen, 181 if (!mangle_packet(skb, dataoff, dptr, datalen,
157 buffer, buflen)) 182 poff, plen, buffer, buflen))
158 return NF_DROP; 183 return NF_DROP;
159 } 184 }
160 185
@@ -167,8 +192,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
167 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { 192 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
168 buflen = sprintf(buffer, "%pI4", 193 buflen = sprintf(buffer, "%pI4",
169 &ct->tuplehash[!dir].tuple.src.u3.ip); 194 &ct->tuplehash[!dir].tuple.src.u3.ip);
170 if (!mangle_packet(skb, dptr, datalen, poff, plen, 195 if (!mangle_packet(skb, dataoff, dptr, datalen,
171 buffer, buflen)) 196 poff, plen, buffer, buflen))
172 return NF_DROP; 197 return NF_DROP;
173 } 198 }
174 199
@@ -181,31 +206,45 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
181 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { 206 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
182 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; 207 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
183 buflen = sprintf(buffer, "%u", ntohs(p)); 208 buflen = sprintf(buffer, "%u", ntohs(p));
184 if (!mangle_packet(skb, dptr, datalen, poff, plen, 209 if (!mangle_packet(skb, dataoff, dptr, datalen,
185 buffer, buflen)) 210 poff, plen, buffer, buflen))
186 return NF_DROP; 211 return NF_DROP;
187 } 212 }
188 } 213 }
189 214
190next: 215next:
191 /* Translate Contact headers */ 216 /* Translate Contact headers */
192 dataoff = 0; 217 coff = 0;
193 in_header = 0; 218 in_header = 0;
194 while (ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, 219 while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
195 SIP_HDR_CONTACT, &in_header, 220 SIP_HDR_CONTACT, &in_header,
196 &matchoff, &matchlen, 221 &matchoff, &matchlen,
197 &addr, &port) > 0) { 222 &addr, &port) > 0) {
198 if (!map_addr(skb, dptr, datalen, matchoff, matchlen, 223 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
199 &addr, port)) 224 &addr, port))
200 return NF_DROP; 225 return NF_DROP;
201 } 226 }
202 227
203 if (!map_sip_addr(skb, dptr, datalen, SIP_HDR_FROM) || 228 if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
204 !map_sip_addr(skb, dptr, datalen, SIP_HDR_TO)) 229 !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
205 return NF_DROP; 230 return NF_DROP;
231
206 return NF_ACCEPT; 232 return NF_ACCEPT;
207} 233}
208 234
235static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
236{
237 enum ip_conntrack_info ctinfo;
238 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
239 const struct tcphdr *th;
240
241 if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
242 return;
243
244 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
245 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
246}
247
209/* Handles expected signalling connections and media streams */ 248/* Handles expected signalling connections and media streams */
210static void ip_nat_sip_expected(struct nf_conn *ct, 249static void ip_nat_sip_expected(struct nf_conn *ct,
211 struct nf_conntrack_expect *exp) 250 struct nf_conntrack_expect *exp)
@@ -232,7 +271,7 @@ static void ip_nat_sip_expected(struct nf_conn *ct,
232 } 271 }
233} 272}
234 273
235static unsigned int ip_nat_sip_expect(struct sk_buff *skb, 274static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
236 const char **dptr, unsigned int *datalen, 275 const char **dptr, unsigned int *datalen,
237 struct nf_conntrack_expect *exp, 276 struct nf_conntrack_expect *exp,
238 unsigned int matchoff, 277 unsigned int matchoff,
@@ -279,8 +318,8 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
279 if (exp->tuple.dst.u3.ip != exp->saved_ip || 318 if (exp->tuple.dst.u3.ip != exp->saved_ip ||
280 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { 319 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
281 buflen = sprintf(buffer, "%pI4:%u", &newip, port); 320 buflen = sprintf(buffer, "%pI4:%u", &newip, port);
282 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, 321 if (!mangle_packet(skb, dataoff, dptr, datalen,
283 buffer, buflen)) 322 matchoff, matchlen, buffer, buflen))
284 goto err; 323 goto err;
285 } 324 }
286 return NF_ACCEPT; 325 return NF_ACCEPT;
@@ -290,7 +329,7 @@ err:
290 return NF_DROP; 329 return NF_DROP;
291} 330}
292 331
293static int mangle_content_len(struct sk_buff *skb, 332static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
294 const char **dptr, unsigned int *datalen) 333 const char **dptr, unsigned int *datalen)
295{ 334{
296 enum ip_conntrack_info ctinfo; 335 enum ip_conntrack_info ctinfo;
@@ -312,12 +351,13 @@ static int mangle_content_len(struct sk_buff *skb,
312 return 0; 351 return 0;
313 352
314 buflen = sprintf(buffer, "%u", c_len); 353 buflen = sprintf(buffer, "%u", c_len);
315 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 354 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
316 buffer, buflen); 355 buffer, buflen);
317} 356}
318 357
319static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr, 358static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
320 unsigned int dataoff, unsigned int *datalen, 359 const char **dptr, unsigned int *datalen,
360 unsigned int sdpoff,
321 enum sdp_header_types type, 361 enum sdp_header_types type,
322 enum sdp_header_types term, 362 enum sdp_header_types term,
323 char *buffer, int buflen) 363 char *buffer, int buflen)
@@ -326,16 +366,16 @@ static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
326 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 366 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
327 unsigned int matchlen, matchoff; 367 unsigned int matchlen, matchoff;
328 368
329 if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term, 369 if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
330 &matchoff, &matchlen) <= 0) 370 &matchoff, &matchlen) <= 0)
331 return -ENOENT; 371 return -ENOENT;
332 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 372 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
333 buffer, buflen) ? 0 : -EINVAL; 373 buffer, buflen) ? 0 : -EINVAL;
334} 374}
335 375
336static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, 376static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff,
337 unsigned int dataoff, 377 const char **dptr, unsigned int *datalen,
338 unsigned int *datalen, 378 unsigned int sdpoff,
339 enum sdp_header_types type, 379 enum sdp_header_types type,
340 enum sdp_header_types term, 380 enum sdp_header_types term,
341 const union nf_inet_addr *addr) 381 const union nf_inet_addr *addr)
@@ -344,16 +384,15 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
344 unsigned int buflen; 384 unsigned int buflen;
345 385
346 buflen = sprintf(buffer, "%pI4", &addr->ip); 386 buflen = sprintf(buffer, "%pI4", &addr->ip);
347 if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, 387 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term,
348 buffer, buflen)) 388 buffer, buflen))
349 return 0; 389 return 0;
350 390
351 return mangle_content_len(skb, dptr, datalen); 391 return mangle_content_len(skb, dataoff, dptr, datalen);
352} 392}
353 393
354static unsigned int ip_nat_sdp_port(struct sk_buff *skb, 394static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
355 const char **dptr, 395 const char **dptr, unsigned int *datalen,
356 unsigned int *datalen,
357 unsigned int matchoff, 396 unsigned int matchoff,
358 unsigned int matchlen, 397 unsigned int matchlen,
359 u_int16_t port) 398 u_int16_t port)
@@ -362,16 +401,16 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
362 unsigned int buflen; 401 unsigned int buflen;
363 402
364 buflen = sprintf(buffer, "%u", port); 403 buflen = sprintf(buffer, "%u", port);
365 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, 404 if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
366 buffer, buflen)) 405 buffer, buflen))
367 return 0; 406 return 0;
368 407
369 return mangle_content_len(skb, dptr, datalen); 408 return mangle_content_len(skb, dataoff, dptr, datalen);
370} 409}
371 410
372static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, 411static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff,
373 unsigned int dataoff, 412 const char **dptr, unsigned int *datalen,
374 unsigned int *datalen, 413 unsigned int sdpoff,
375 const union nf_inet_addr *addr) 414 const union nf_inet_addr *addr)
376{ 415{
377 char buffer[sizeof("nnn.nnn.nnn.nnn")]; 416 char buffer[sizeof("nnn.nnn.nnn.nnn")];
@@ -379,12 +418,12 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
379 418
380 /* Mangle session description owner and contact addresses */ 419 /* Mangle session description owner and contact addresses */
381 buflen = sprintf(buffer, "%pI4", &addr->ip); 420 buflen = sprintf(buffer, "%pI4", &addr->ip);
382 if (mangle_sdp_packet(skb, dptr, dataoff, datalen, 421 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
383 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, 422 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
384 buffer, buflen)) 423 buffer, buflen))
385 return 0; 424 return 0;
386 425
387 switch (mangle_sdp_packet(skb, dptr, dataoff, datalen, 426 switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
388 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA, 427 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
389 buffer, buflen)) { 428 buffer, buflen)) {
390 case 0: 429 case 0:
@@ -401,14 +440,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
401 return 0; 440 return 0;
402 } 441 }
403 442
404 return mangle_content_len(skb, dptr, datalen); 443 return mangle_content_len(skb, dataoff, dptr, datalen);
405} 444}
406 445
407/* So, this packet has hit the connection tracking matching code. 446/* So, this packet has hit the connection tracking matching code.
408 Mangle it, and change the expectation to match the new version. */ 447 Mangle it, and change the expectation to match the new version. */
409static unsigned int ip_nat_sdp_media(struct sk_buff *skb, 448static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
410 const char **dptr, 449 const char **dptr, unsigned int *datalen,
411 unsigned int *datalen,
412 struct nf_conntrack_expect *rtp_exp, 450 struct nf_conntrack_expect *rtp_exp,
413 struct nf_conntrack_expect *rtcp_exp, 451 struct nf_conntrack_expect *rtcp_exp,
414 unsigned int mediaoff, 452 unsigned int mediaoff,
@@ -456,7 +494,8 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
456 494
457 /* Update media port. */ 495 /* Update media port. */
458 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port && 496 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
459 !ip_nat_sdp_port(skb, dptr, datalen, mediaoff, medialen, port)) 497 !ip_nat_sdp_port(skb, dataoff, dptr, datalen,
498 mediaoff, medialen, port))
460 goto err2; 499 goto err2;
461 500
462 return NF_ACCEPT; 501 return NF_ACCEPT;
@@ -471,6 +510,7 @@ err1:
471static void __exit nf_nat_sip_fini(void) 510static void __exit nf_nat_sip_fini(void)
472{ 511{
473 rcu_assign_pointer(nf_nat_sip_hook, NULL); 512 rcu_assign_pointer(nf_nat_sip_hook, NULL);
513 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL);
474 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); 514 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
475 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); 515 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
476 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); 516 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
@@ -482,12 +522,14 @@ static void __exit nf_nat_sip_fini(void)
482static int __init nf_nat_sip_init(void) 522static int __init nf_nat_sip_init(void)
483{ 523{
484 BUG_ON(nf_nat_sip_hook != NULL); 524 BUG_ON(nf_nat_sip_hook != NULL);
525 BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
485 BUG_ON(nf_nat_sip_expect_hook != NULL); 526 BUG_ON(nf_nat_sip_expect_hook != NULL);
486 BUG_ON(nf_nat_sdp_addr_hook != NULL); 527 BUG_ON(nf_nat_sdp_addr_hook != NULL);
487 BUG_ON(nf_nat_sdp_port_hook != NULL); 528 BUG_ON(nf_nat_sdp_port_hook != NULL);
488 BUG_ON(nf_nat_sdp_session_hook != NULL); 529 BUG_ON(nf_nat_sdp_session_hook != NULL);
489 BUG_ON(nf_nat_sdp_media_hook != NULL); 530 BUG_ON(nf_nat_sdp_media_hook != NULL);
490 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 531 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
532 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
491 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); 533 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
492 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); 534 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
493 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); 535 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index d9521f6f9ed0..4d85b6e55f29 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -43,6 +43,7 @@
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/slab.h>
46#include <linux/in.h> 47#include <linux/in.h>
47#include <linux/ip.h> 48#include <linux/ip.h>
48#include <linux/udp.h> 49#include <linux/udp.h>
@@ -1038,7 +1039,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1038 unsigned int cls, con, tag, vers, pdutype; 1039 unsigned int cls, con, tag, vers, pdutype;
1039 struct asn1_ctx ctx; 1040 struct asn1_ctx ctx;
1040 struct asn1_octstr comm; 1041 struct asn1_octstr comm;
1041 struct snmp_object **obj; 1042 struct snmp_object *obj;
1042 1043
1043 if (debug > 1) 1044 if (debug > 1)
1044 hex_dump(msg, len); 1045 hex_dump(msg, len);
@@ -1148,43 +1149,34 @@ static int snmp_parse_mangle(unsigned char *msg,
1148 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) 1149 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
1149 return 0; 1150 return 0;
1150 1151
1151 obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
1152 if (obj == NULL) {
1153 if (net_ratelimit())
1154 printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
1155 return 0;
1156 }
1157
1158 while (!asn1_eoc_decode(&ctx, eoc)) { 1152 while (!asn1_eoc_decode(&ctx, eoc)) {
1159 unsigned int i; 1153 unsigned int i;
1160 1154
1161 if (!snmp_object_decode(&ctx, obj)) { 1155 if (!snmp_object_decode(&ctx, &obj)) {
1162 if (*obj) { 1156 if (obj) {
1163 kfree((*obj)->id); 1157 kfree(obj->id);
1164 kfree(*obj); 1158 kfree(obj);
1165 } 1159 }
1166 kfree(obj);
1167 return 0; 1160 return 0;
1168 } 1161 }
1169 1162
1170 if (debug > 1) { 1163 if (debug > 1) {
1171 printk(KERN_DEBUG "bsalg: object: "); 1164 printk(KERN_DEBUG "bsalg: object: ");
1172 for (i = 0; i < (*obj)->id_len; i++) { 1165 for (i = 0; i < obj->id_len; i++) {
1173 if (i > 0) 1166 if (i > 0)
1174 printk("."); 1167 printk(".");
1175 printk("%lu", (*obj)->id[i]); 1168 printk("%lu", obj->id[i]);
1176 } 1169 }
1177 printk(": type=%u\n", (*obj)->type); 1170 printk(": type=%u\n", obj->type);
1178 1171
1179 } 1172 }
1180 1173
1181 if ((*obj)->type == SNMP_IPADDR) 1174 if (obj->type == SNMP_IPADDR)
1182 mangle_address(ctx.begin, ctx.pointer - 4 , map, check); 1175 mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
1183 1176
1184 kfree((*obj)->id); 1177 kfree(obj->id);
1185 kfree(*obj); 1178 kfree(obj);
1186 } 1179 }
1187 kfree(obj);
1188 1180
1189 if (!asn1_eoc_decode(&ctx, eoc)) 1181 if (!asn1_eoc_decode(&ctx, eoc))
1190 return 0; 1182 return 0;
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 5678e9562c15..c39c9cf6bee6 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/icmp.h> 9#include <linux/icmp.h>
10#include <linux/gfp.h>
10#include <linux/ip.h> 11#include <linux/ip.h>
11#include <linux/netfilter.h> 12#include <linux/netfilter.h>
12#include <linux/netfilter_ipv4.h> 13#include <linux/netfilter_ipv4.h>
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f25542c48b7d..4f1f337f4337 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -127,8 +127,8 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
127 SNMP_MIB_SENTINEL 127 SNMP_MIB_SENTINEL
128}; 128};
129 129
130static struct { 130static const struct {
131 char *name; 131 const char *name;
132 int index; 132 int index;
133} icmpmibmap[] = { 133} icmpmibmap[] = {
134 { "DestUnreachs", ICMP_DEST_UNREACH }, 134 { "DestUnreachs", ICMP_DEST_UNREACH },
@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = {
249 SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), 249 SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
250 SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), 250 SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), 251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
252 SNMP_MIB_SENTINEL 254 SNMP_MIB_SENTINEL
253}; 255};
254 256
@@ -280,7 +282,7 @@ static void icmpmsg_put(struct seq_file *seq)
280 282
281 count = 0; 283 count = 0;
282 for (i = 0; i < ICMPMSG_MIB_MAX; i++) { 284 for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
283 val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i); 285 val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
284 if (val) { 286 if (val) {
285 type[count] = i; 287 type[count] = i;
286 vals[count++] = val; 288 vals[count++] = val;
@@ -307,18 +309,18 @@ static void icmp_put(struct seq_file *seq)
307 for (i=0; icmpmibmap[i].name != NULL; i++) 309 for (i=0; icmpmibmap[i].name != NULL; i++)
308 seq_printf(seq, " Out%s", icmpmibmap[i].name); 310 seq_printf(seq, " Out%s", icmpmibmap[i].name);
309 seq_printf(seq, "\nIcmp: %lu %lu", 311 seq_printf(seq, "\nIcmp: %lu %lu",
310 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), 312 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
311 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); 313 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
312 for (i=0; icmpmibmap[i].name != NULL; i++) 314 for (i=0; icmpmibmap[i].name != NULL; i++)
313 seq_printf(seq, " %lu", 315 seq_printf(seq, " %lu",
314 snmp_fold_field((void **) net->mib.icmpmsg_statistics, 316 snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
315 icmpmibmap[i].index)); 317 icmpmibmap[i].index));
316 seq_printf(seq, " %lu %lu", 318 seq_printf(seq, " %lu %lu",
317 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), 319 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
318 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); 320 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
319 for (i=0; icmpmibmap[i].name != NULL; i++) 321 for (i=0; icmpmibmap[i].name != NULL; i++)
320 seq_printf(seq, " %lu", 322 seq_printf(seq, " %lu",
321 snmp_fold_field((void **) net->mib.icmpmsg_statistics, 323 snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
322 icmpmibmap[i].index | 0x100)); 324 icmpmibmap[i].index | 0x100));
323} 325}
324 326
@@ -341,7 +343,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
341 343
342 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 344 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
343 seq_printf(seq, " %lu", 345 seq_printf(seq, " %lu",
344 snmp_fold_field((void **)net->mib.ip_statistics, 346 snmp_fold_field((void __percpu **)net->mib.ip_statistics,
345 snmp4_ipstats_list[i].entry)); 347 snmp4_ipstats_list[i].entry));
346 348
347 icmp_put(seq); /* RFC 2011 compatibility */ 349 icmp_put(seq); /* RFC 2011 compatibility */
@@ -356,11 +358,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
356 /* MaxConn field is signed, RFC 2012 */ 358 /* MaxConn field is signed, RFC 2012 */
357 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) 359 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
358 seq_printf(seq, " %ld", 360 seq_printf(seq, " %ld",
359 snmp_fold_field((void **)net->mib.tcp_statistics, 361 snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
360 snmp4_tcp_list[i].entry)); 362 snmp4_tcp_list[i].entry));
361 else 363 else
362 seq_printf(seq, " %lu", 364 seq_printf(seq, " %lu",
363 snmp_fold_field((void **)net->mib.tcp_statistics, 365 snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
364 snmp4_tcp_list[i].entry)); 366 snmp4_tcp_list[i].entry));
365 } 367 }
366 368
@@ -371,7 +373,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
371 seq_puts(seq, "\nUdp:"); 373 seq_puts(seq, "\nUdp:");
372 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 374 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
373 seq_printf(seq, " %lu", 375 seq_printf(seq, " %lu",
374 snmp_fold_field((void **)net->mib.udp_statistics, 376 snmp_fold_field((void __percpu **)net->mib.udp_statistics,
375 snmp4_udp_list[i].entry)); 377 snmp4_udp_list[i].entry));
376 378
377 /* the UDP and UDP-Lite MIBs are the same */ 379 /* the UDP and UDP-Lite MIBs are the same */
@@ -382,7 +384,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
382 seq_puts(seq, "\nUdpLite:"); 384 seq_puts(seq, "\nUdpLite:");
383 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 385 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
384 seq_printf(seq, " %lu", 386 seq_printf(seq, " %lu",
385 snmp_fold_field((void **)net->mib.udplite_statistics, 387 snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
386 snmp4_udp_list[i].entry)); 388 snmp4_udp_list[i].entry));
387 389
388 seq_putc(seq, '\n'); 390 seq_putc(seq, '\n');
@@ -419,7 +421,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
419 seq_puts(seq, "\nTcpExt:"); 421 seq_puts(seq, "\nTcpExt:");
420 for (i = 0; snmp4_net_list[i].name != NULL; i++) 422 for (i = 0; snmp4_net_list[i].name != NULL; i++)
421 seq_printf(seq, " %lu", 423 seq_printf(seq, " %lu",
422 snmp_fold_field((void **)net->mib.net_statistics, 424 snmp_fold_field((void __percpu **)net->mib.net_statistics,
423 snmp4_net_list[i].entry)); 425 snmp4_net_list[i].entry));
424 426
425 seq_puts(seq, "\nIpExt:"); 427 seq_puts(seq, "\nIpExt:");
@@ -429,7 +431,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
429 seq_puts(seq, "\nIpExt:"); 431 seq_puts(seq, "\nIpExt:");
430 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) 432 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
431 seq_printf(seq, " %lu", 433 seq_printf(seq, " %lu",
432 snmp_fold_field((void **)net->mib.ip_statistics, 434 snmp_fold_field((void __percpu **)net->mib.ip_statistics,
433 snmp4_ipextstats_list[i].entry)); 435 snmp4_ipextstats_list[i].entry));
434 436
435 seq_putc(seq, '\n'); 437 seq_putc(seq, '\n');
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ce154b47f1da..cc6f097fbd5f 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -60,7 +60,6 @@
60#include <net/net_namespace.h> 60#include <net/net_namespace.h>
61#include <net/dst.h> 61#include <net/dst.h>
62#include <net/sock.h> 62#include <net/sock.h>
63#include <linux/gfp.h>
64#include <linux/ip.h> 63#include <linux/ip.h>
65#include <linux/net.h> 64#include <linux/net.h>
66#include <net/ip.h> 65#include <net/ip.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d62b05d33384..cb562fdd9b9a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -90,6 +90,7 @@
90#include <linux/jhash.h> 90#include <linux/jhash.h>
91#include <linux/rcupdate.h> 91#include <linux/rcupdate.h>
92#include <linux/times.h> 92#include <linux/times.h>
93#include <linux/slab.h>
93#include <net/dst.h> 94#include <net/dst.h>
94#include <net/net_namespace.h> 95#include <net/net_namespace.h>
95#include <net/protocol.h> 96#include <net/protocol.h>
@@ -146,7 +147,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146static void ipv4_link_failure(struct sk_buff *skb); 147static void ipv4_link_failure(struct sk_buff *skb);
147static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 148static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148static int rt_garbage_collect(struct dst_ops *ops); 149static int rt_garbage_collect(struct dst_ops *ops);
149static void rt_emergency_hash_rebuild(struct net *net);
150 150
151 151
152static struct dst_ops ipv4_dst_ops = { 152static struct dst_ops ipv4_dst_ops = {
@@ -287,12 +287,12 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
287 if (!rt_hash_table[st->bucket].chain) 287 if (!rt_hash_table[st->bucket].chain)
288 continue; 288 continue;
289 rcu_read_lock_bh(); 289 rcu_read_lock_bh();
290 r = rcu_dereference(rt_hash_table[st->bucket].chain); 290 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
291 while (r) { 291 while (r) {
292 if (dev_net(r->u.dst.dev) == seq_file_net(seq) && 292 if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
293 r->rt_genid == st->genid) 293 r->rt_genid == st->genid)
294 return r; 294 return r;
295 r = rcu_dereference(r->u.dst.rt_next); 295 r = rcu_dereference_bh(r->u.dst.rt_next);
296 } 296 }
297 rcu_read_unlock_bh(); 297 rcu_read_unlock_bh();
298 } 298 }
@@ -314,7 +314,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
314 rcu_read_lock_bh(); 314 rcu_read_lock_bh();
315 r = rt_hash_table[st->bucket].chain; 315 r = rt_hash_table[st->bucket].chain;
316 } 316 }
317 return rcu_dereference(r); 317 return rcu_dereference_bh(r);
318} 318}
319 319
320static struct rtable *rt_cache_get_next(struct seq_file *seq, 320static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -780,11 +780,30 @@ static void rt_do_flush(int process_context)
780#define FRACT_BITS 3 780#define FRACT_BITS 3
781#define ONE (1UL << FRACT_BITS) 781#define ONE (1UL << FRACT_BITS)
782 782
783/*
784 * Given a hash chain and an item in this hash chain,
785 * find if a previous entry has the same hash_inputs
786 * (but differs on tos, mark or oif)
787 * Returns 0 if an alias is found.
788 * Returns ONE if rth has no alias before itself.
789 */
790static int has_noalias(const struct rtable *head, const struct rtable *rth)
791{
792 const struct rtable *aux = head;
793
794 while (aux != rth) {
795 if (compare_hash_inputs(&aux->fl, &rth->fl))
796 return 0;
797 aux = aux->u.dst.rt_next;
798 }
799 return ONE;
800}
801
783static void rt_check_expire(void) 802static void rt_check_expire(void)
784{ 803{
785 static unsigned int rover; 804 static unsigned int rover;
786 unsigned int i = rover, goal; 805 unsigned int i = rover, goal;
787 struct rtable *rth, *aux, **rthp; 806 struct rtable *rth, **rthp;
788 unsigned long samples = 0; 807 unsigned long samples = 0;
789 unsigned long sum = 0, sum2 = 0; 808 unsigned long sum = 0, sum2 = 0;
790 unsigned long delta; 809 unsigned long delta;
@@ -835,15 +854,7 @@ nofree:
835 * attributes don't unfairly skew 854 * attributes don't unfairly skew
836 * the length computation 855 * the length computation
837 */ 856 */
838 for (aux = rt_hash_table[i].chain;;) { 857 length += has_noalias(rt_hash_table[i].chain, rth);
839 if (aux == rth) {
840 length += ONE;
841 break;
842 }
843 if (compare_hash_inputs(&aux->fl, &rth->fl))
844 break;
845 aux = aux->u.dst.rt_next;
846 }
847 continue; 858 continue;
848 } 859 }
849 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) 860 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
@@ -922,10 +933,8 @@ static void rt_secret_rebuild_oneshot(struct net *net)
922{ 933{
923 del_timer_sync(&net->ipv4.rt_secret_timer); 934 del_timer_sync(&net->ipv4.rt_secret_timer);
924 rt_cache_invalidate(net); 935 rt_cache_invalidate(net);
925 if (ip_rt_secret_interval) { 936 if (ip_rt_secret_interval)
926 net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval; 937 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
927 add_timer(&net->ipv4.rt_secret_timer);
928 }
929} 938}
930 939
931static void rt_emergency_hash_rebuild(struct net *net) 940static void rt_emergency_hash_rebuild(struct net *net)
@@ -1073,8 +1082,23 @@ work_done:
1073out: return 0; 1082out: return 0;
1074} 1083}
1075 1084
1085/*
1086 * Returns number of entries in a hash chain that have different hash_inputs
1087 */
1088static int slow_chain_length(const struct rtable *head)
1089{
1090 int length = 0;
1091 const struct rtable *rth = head;
1092
1093 while (rth) {
1094 length += has_noalias(head, rth);
1095 rth = rth->u.dst.rt_next;
1096 }
1097 return length >> FRACT_BITS;
1098}
1099
1076static int rt_intern_hash(unsigned hash, struct rtable *rt, 1100static int rt_intern_hash(unsigned hash, struct rtable *rt,
1077 struct rtable **rp, struct sk_buff *skb) 1101 struct rtable **rp, struct sk_buff *skb, int ifindex)
1078{ 1102{
1079 struct rtable *rth, **rthp; 1103 struct rtable *rth, **rthp;
1080 unsigned long now; 1104 unsigned long now;
@@ -1185,14 +1209,20 @@ restart:
1185 rt_free(cand); 1209 rt_free(cand);
1186 } 1210 }
1187 } else { 1211 } else {
1188 if (chain_length > rt_chain_length_max) { 1212 if (chain_length > rt_chain_length_max &&
1213 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1189 struct net *net = dev_net(rt->u.dst.dev); 1214 struct net *net = dev_net(rt->u.dst.dev);
1190 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1215 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1191 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1216 if (!rt_caching(net)) {
1192 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1217 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1193 rt->u.dst.dev->name, num); 1218 rt->u.dst.dev->name, num);
1194 } 1219 }
1195 rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); 1220 rt_emergency_hash_rebuild(net);
1221 spin_unlock_bh(rt_hash_lock_addr(hash));
1222
1223 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1224 ifindex, rt_genid(net));
1225 goto restart;
1196 } 1226 }
1197 } 1227 }
1198 1228
@@ -1417,7 +1447,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1417 dev_hold(rt->u.dst.dev); 1447 dev_hold(rt->u.dst.dev);
1418 if (rt->idev) 1448 if (rt->idev)
1419 in_dev_hold(rt->idev); 1449 in_dev_hold(rt->idev);
1420 rt->u.dst.obsolete = 0; 1450 rt->u.dst.obsolete = -1;
1421 rt->u.dst.lastuse = jiffies; 1451 rt->u.dst.lastuse = jiffies;
1422 rt->u.dst.path = &rt->u.dst; 1452 rt->u.dst.path = &rt->u.dst;
1423 rt->u.dst.neighbour = NULL; 1453 rt->u.dst.neighbour = NULL;
@@ -1453,7 +1483,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1453 &netevent); 1483 &netevent);
1454 1484
1455 rt_del(hash, rth); 1485 rt_del(hash, rth);
1456 if (!rt_intern_hash(hash, rt, &rt, NULL)) 1486 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1457 ip_rt_put(rt); 1487 ip_rt_put(rt);
1458 goto do_next; 1488 goto do_next;
1459 } 1489 }
@@ -1482,11 +1512,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1482 struct dst_entry *ret = dst; 1512 struct dst_entry *ret = dst;
1483 1513
1484 if (rt) { 1514 if (rt) {
1485 if (dst->obsolete) { 1515 if (dst->obsolete > 0) {
1486 ip_rt_put(rt); 1516 ip_rt_put(rt);
1487 ret = NULL; 1517 ret = NULL;
1488 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1518 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1489 rt->u.dst.expires) { 1519 (rt->u.dst.expires &&
1520 time_after_eq(jiffies, rt->u.dst.expires))) {
1490 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1521 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1491 rt->fl.oif, 1522 rt->fl.oif,
1492 rt_genid(dev_net(dst->dev))); 1523 rt_genid(dev_net(dst->dev)));
@@ -1702,7 +1733,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1702 1733
1703static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1734static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1704{ 1735{
1705 return NULL; 1736 if (rt_is_expired((struct rtable *)dst))
1737 return NULL;
1738 return dst;
1706} 1739}
1707 1740
1708static void ipv4_dst_destroy(struct dst_entry *dst) 1741static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1864,7 +1897,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1864 if (!rth) 1897 if (!rth)
1865 goto e_nobufs; 1898 goto e_nobufs;
1866 1899
1867 rth->u.dst.output= ip_rt_bug; 1900 rth->u.dst.output = ip_rt_bug;
1901 rth->u.dst.obsolete = -1;
1868 1902
1869 atomic_set(&rth->u.dst.__refcnt, 1); 1903 atomic_set(&rth->u.dst.__refcnt, 1);
1870 rth->u.dst.flags= DST_HOST; 1904 rth->u.dst.flags= DST_HOST;
@@ -1903,7 +1937,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1903 1937
1904 in_dev_put(in_dev); 1938 in_dev_put(in_dev);
1905 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1939 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1906 return rt_intern_hash(hash, rth, NULL, skb); 1940 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1907 1941
1908e_nobufs: 1942e_nobufs:
1909 in_dev_put(in_dev); 1943 in_dev_put(in_dev);
@@ -1990,8 +2024,13 @@ static int __mkroute_input(struct sk_buff *skb,
1990 if (skb->protocol != htons(ETH_P_IP)) { 2024 if (skb->protocol != htons(ETH_P_IP)) {
1991 /* Not IP (i.e. ARP). Do not create route, if it is 2025 /* Not IP (i.e. ARP). Do not create route, if it is
1992 * invalid for proxy arp. DNAT routes are always valid. 2026 * invalid for proxy arp. DNAT routes are always valid.
2027 *
2028 * Proxy arp feature have been extended to allow, ARP
2029 * replies back to the same interface, to support
2030 * Private VLAN switch technologies. See arp.c.
1993 */ 2031 */
1994 if (out_dev == in_dev) { 2032 if (out_dev == in_dev &&
2033 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1995 err = -EINVAL; 2034 err = -EINVAL;
1996 goto cleanup; 2035 goto cleanup;
1997 } 2036 }
@@ -2025,6 +2064,7 @@ static int __mkroute_input(struct sk_buff *skb,
2025 rth->fl.oif = 0; 2064 rth->fl.oif = 0;
2026 rth->rt_spec_dst= spec_dst; 2065 rth->rt_spec_dst= spec_dst;
2027 2066
2067 rth->u.dst.obsolete = -1;
2028 rth->u.dst.input = ip_forward; 2068 rth->u.dst.input = ip_forward;
2029 rth->u.dst.output = ip_output; 2069 rth->u.dst.output = ip_output;
2030 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); 2070 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
@@ -2064,7 +2104,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2064 /* put it into the cache */ 2104 /* put it into the cache */
2065 hash = rt_hash(daddr, saddr, fl->iif, 2105 hash = rt_hash(daddr, saddr, fl->iif,
2066 rt_genid(dev_net(rth->u.dst.dev))); 2106 rt_genid(dev_net(rth->u.dst.dev)));
2067 return rt_intern_hash(hash, rth, NULL, skb); 2107 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2068} 2108}
2069 2109
2070/* 2110/*
@@ -2189,6 +2229,7 @@ local_input:
2189 goto e_nobufs; 2229 goto e_nobufs;
2190 2230
2191 rth->u.dst.output= ip_rt_bug; 2231 rth->u.dst.output= ip_rt_bug;
2232 rth->u.dst.obsolete = -1;
2192 rth->rt_genid = rt_genid(net); 2233 rth->rt_genid = rt_genid(net);
2193 2234
2194 atomic_set(&rth->u.dst.__refcnt, 1); 2235 atomic_set(&rth->u.dst.__refcnt, 1);
@@ -2220,7 +2261,7 @@ local_input:
2220 } 2261 }
2221 rth->rt_type = res.type; 2262 rth->rt_type = res.type;
2222 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); 2263 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2223 err = rt_intern_hash(hash, rth, NULL, skb); 2264 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
2224 goto done; 2265 goto done;
2225 2266
2226no_route: 2267no_route:
@@ -2415,6 +2456,7 @@ static int __mkroute_output(struct rtable **result,
2415 rth->rt_spec_dst= fl->fl4_src; 2456 rth->rt_spec_dst= fl->fl4_src;
2416 2457
2417 rth->u.dst.output=ip_output; 2458 rth->u.dst.output=ip_output;
2459 rth->u.dst.obsolete = -1;
2418 rth->rt_genid = rt_genid(dev_net(dev_out)); 2460 rth->rt_genid = rt_genid(dev_net(dev_out));
2419 2461
2420 RT_CACHE_STAT_INC(out_slow_tot); 2462 RT_CACHE_STAT_INC(out_slow_tot);
@@ -2466,7 +2508,7 @@ static int ip_mkroute_output(struct rtable **rp,
2466 if (err == 0) { 2508 if (err == 0) {
2467 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, 2509 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2468 rt_genid(dev_net(dev_out))); 2510 rt_genid(dev_net(dev_out)));
2469 err = rt_intern_hash(hash, rth, rp, NULL); 2511 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2470 } 2512 }
2471 2513
2472 return err; 2514 return err;
@@ -2689,8 +2731,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2689 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); 2731 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2690 2732
2691 rcu_read_lock_bh(); 2733 rcu_read_lock_bh();
2692 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2734 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2693 rth = rcu_dereference(rth->u.dst.rt_next)) { 2735 rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
2694 if (rth->fl.fl4_dst == flp->fl4_dst && 2736 if (rth->fl.fl4_dst == flp->fl4_dst &&
2695 rth->fl.fl4_src == flp->fl4_src && 2737 rth->fl.fl4_src == flp->fl4_src &&
2696 rth->fl.iif == 0 && 2738 rth->fl.iif == 0 &&
@@ -3008,8 +3050,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3008 if (!rt_hash_table[h].chain) 3050 if (!rt_hash_table[h].chain)
3009 continue; 3051 continue;
3010 rcu_read_lock_bh(); 3052 rcu_read_lock_bh();
3011 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 3053 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3012 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 3054 rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
3013 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 3055 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
3014 continue; 3056 continue;
3015 if (rt_is_expired(rt)) 3057 if (rt_is_expired(rt))
@@ -3072,22 +3114,20 @@ static void rt_secret_reschedule(int old)
3072 rtnl_lock(); 3114 rtnl_lock();
3073 for_each_net(net) { 3115 for_each_net(net) {
3074 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); 3116 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
3117 long time;
3075 3118
3076 if (!new) 3119 if (!new)
3077 continue; 3120 continue;
3078 3121
3079 if (deleted) { 3122 if (deleted) {
3080 long time = net->ipv4.rt_secret_timer.expires - jiffies; 3123 time = net->ipv4.rt_secret_timer.expires - jiffies;
3081 3124
3082 if (time <= 0 || (time += diff) <= 0) 3125 if (time <= 0 || (time += diff) <= 0)
3083 time = 0; 3126 time = 0;
3084
3085 net->ipv4.rt_secret_timer.expires = time;
3086 } else 3127 } else
3087 net->ipv4.rt_secret_timer.expires = new; 3128 time = new;
3088 3129
3089 net->ipv4.rt_secret_timer.expires += jiffies; 3130 mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
3090 add_timer(&net->ipv4.rt_secret_timer);
3091 } 3131 }
3092 rtnl_unlock(); 3132 rtnl_unlock();
3093} 3133}
@@ -3329,7 +3369,7 @@ static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3329 3369
3330 3370
3331#ifdef CONFIG_NET_CLS_ROUTE 3371#ifdef CONFIG_NET_CLS_ROUTE
3332struct ip_rt_acct *ip_rt_acct __read_mostly; 3372struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3333#endif /* CONFIG_NET_CLS_ROUTE */ 3373#endif /* CONFIG_NET_CLS_ROUTE */
3334 3374
3335static __initdata unsigned long rhash_entries; 3375static __initdata unsigned long rhash_entries;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80ef2473..5c24db4a3c91 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -358,7 +358,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
358 358
359 tcp_select_initial_window(tcp_full_space(sk), req->mss, 359 tcp_select_initial_window(tcp_full_space(sk), req->mss,
360 &req->rcv_wnd, &req->window_clamp, 360 &req->rcv_wnd, &req->window_clamp,
361 ireq->wscale_ok, &rcv_wscale); 361 ireq->wscale_ok, &rcv_wscale,
362 dst_metric(&rt->u.dst, RTAX_INITRWND));
362 363
363 ireq->rcv_wscale = rcv_wscale; 364 ireq->rcv_wscale = rcv_wscale;
364 365
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7e3712ce3994..1cd5c15174b8 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -12,6 +12,7 @@
12#include <linux/inetdevice.h> 12#include <linux/inetdevice.h>
13#include <linux/seqlock.h> 13#include <linux/seqlock.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h>
15#include <net/snmp.h> 16#include <net/snmp.h>
16#include <net/icmp.h> 17#include <net/icmp.h>
17#include <net/ip.h> 18#include <net/ip.h>
@@ -576,6 +577,20 @@ static struct ctl_table ipv4_table[] = {
576 .proc_handler = proc_dointvec 577 .proc_handler = proc_dointvec
577 }, 578 },
578 { 579 {
580 .procname = "tcp_thin_linear_timeouts",
581 .data = &sysctl_tcp_thin_linear_timeouts,
582 .maxlen = sizeof(int),
583 .mode = 0644,
584 .proc_handler = proc_dointvec
585 },
586 {
587 .procname = "tcp_thin_dupack",
588 .data = &sysctl_tcp_thin_dupack,
589 .maxlen = sizeof(int),
590 .mode = 0644,
591 .proc_handler = proc_dointvec
592 },
593 {
579 .procname = "udp_mem", 594 .procname = "udp_mem",
580 .data = &sysctl_udp_mem, 595 .data = &sysctl_udp_mem,
581 .maxlen = sizeof(sysctl_udp_mem), 596 .maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb25e2e..0f8caf64caa3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -265,6 +265,7 @@
265#include <linux/err.h> 265#include <linux/err.h>
266#include <linux/crypto.h> 266#include <linux/crypto.h>
267#include <linux/time.h> 267#include <linux/time.h>
268#include <linux/slab.h>
268 269
269#include <net/icmp.h> 270#include <net/icmp.h>
270#include <net/tcp.h> 271#include <net/tcp.h>
@@ -429,7 +430,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
429 if (tp->urg_seq == tp->copied_seq && 430 if (tp->urg_seq == tp->copied_seq &&
430 !sock_flag(sk, SOCK_URGINLINE) && 431 !sock_flag(sk, SOCK_URGINLINE) &&
431 tp->urg_data) 432 tp->urg_data)
432 target--; 433 target++;
433 434
434 /* Potential race condition. If read of tp below will 435 /* Potential race condition. If read of tp below will
435 * escape above sk->sk_state, we can be illegally awaken 436 * escape above sk->sk_state, we can be illegally awaken
@@ -536,8 +537,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
536 tp->nonagle &= ~TCP_NAGLE_PUSH; 537 tp->nonagle &= ~TCP_NAGLE_PUSH;
537} 538}
538 539
539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 540static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
540 struct sk_buff *skb)
541{ 541{
542 if (flags & MSG_OOB) 542 if (flags & MSG_OOB)
543 tp->snd_up = tp->write_seq; 543 tp->snd_up = tp->write_seq;
@@ -546,13 +546,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
546static inline void tcp_push(struct sock *sk, int flags, int mss_now, 546static inline void tcp_push(struct sock *sk, int flags, int mss_now,
547 int nonagle) 547 int nonagle)
548{ 548{
549 struct tcp_sock *tp = tcp_sk(sk);
550
551 if (tcp_send_head(sk)) { 549 if (tcp_send_head(sk)) {
552 struct sk_buff *skb = tcp_write_queue_tail(sk); 550 struct tcp_sock *tp = tcp_sk(sk);
551
553 if (!(flags & MSG_MORE) || forced_push(tp)) 552 if (!(flags & MSG_MORE) || forced_push(tp))
554 tcp_mark_push(tp, skb); 553 tcp_mark_push(tp, tcp_write_queue_tail(sk));
555 tcp_mark_urg(tp, flags, skb); 554
555 tcp_mark_urg(tp, flags);
556 __tcp_push_pending_frames(sk, mss_now, 556 __tcp_push_pending_frames(sk, mss_now,
557 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 557 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
558 } 558 }
@@ -877,12 +877,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
877#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 877#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
878#define TCP_OFF(sk) (sk->sk_sndmsg_off) 878#define TCP_OFF(sk) (sk->sk_sndmsg_off)
879 879
880static inline int select_size(struct sock *sk) 880static inline int select_size(struct sock *sk, int sg)
881{ 881{
882 struct tcp_sock *tp = tcp_sk(sk); 882 struct tcp_sock *tp = tcp_sk(sk);
883 int tmp = tp->mss_cache; 883 int tmp = tp->mss_cache;
884 884
885 if (sk->sk_route_caps & NETIF_F_SG) { 885 if (sg) {
886 if (sk_can_gso(sk)) 886 if (sk_can_gso(sk))
887 tmp = 0; 887 tmp = 0;
888 else { 888 else {
@@ -906,7 +906,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
906 struct sk_buff *skb; 906 struct sk_buff *skb;
907 int iovlen, flags; 907 int iovlen, flags;
908 int mss_now, size_goal; 908 int mss_now, size_goal;
909 int err, copied; 909 int sg, err, copied;
910 long timeo; 910 long timeo;
911 911
912 lock_sock(sk); 912 lock_sock(sk);
@@ -934,6 +934,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
934 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 934 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
935 goto out_err; 935 goto out_err;
936 936
937 sg = sk->sk_route_caps & NETIF_F_SG;
938
937 while (--iovlen >= 0) { 939 while (--iovlen >= 0) {
938 int seglen = iov->iov_len; 940 int seglen = iov->iov_len;
939 unsigned char __user *from = iov->iov_base; 941 unsigned char __user *from = iov->iov_base;
@@ -959,8 +961,9 @@ new_segment:
959 if (!sk_stream_memory_free(sk)) 961 if (!sk_stream_memory_free(sk))
960 goto wait_for_sndbuf; 962 goto wait_for_sndbuf;
961 963
962 skb = sk_stream_alloc_skb(sk, select_size(sk), 964 skb = sk_stream_alloc_skb(sk,
963 sk->sk_allocation); 965 select_size(sk, sg),
966 sk->sk_allocation);
964 if (!skb) 967 if (!skb)
965 goto wait_for_memory; 968 goto wait_for_memory;
966 969
@@ -997,9 +1000,7 @@ new_segment:
997 /* We can extend the last page 1000 /* We can extend the last page
998 * fragment. */ 1001 * fragment. */
999 merge = 1; 1002 merge = 1;
1000 } else if (i == MAX_SKB_FRAGS || 1003 } else if (i == MAX_SKB_FRAGS || !sg) {
1001 (!i &&
1002 !(sk->sk_route_caps & NETIF_F_SG))) {
1003 /* Need to add new fragment and cannot 1004 /* Need to add new fragment and cannot
1004 * do this because interface is non-SG, 1005 * do this because interface is non-SG,
1005 * or because all the page slots are 1006 * or because all the page slots are
@@ -1254,6 +1255,39 @@ static void tcp_prequeue_process(struct sock *sk)
1254 tp->ucopy.memory = 0; 1255 tp->ucopy.memory = 0;
1255} 1256}
1256 1257
1258#ifdef CONFIG_NET_DMA
1259static void tcp_service_net_dma(struct sock *sk, bool wait)
1260{
1261 dma_cookie_t done, used;
1262 dma_cookie_t last_issued;
1263 struct tcp_sock *tp = tcp_sk(sk);
1264
1265 if (!tp->ucopy.dma_chan)
1266 return;
1267
1268 last_issued = tp->ucopy.dma_cookie;
1269 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1270
1271 do {
1272 if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1273 last_issued, &done,
1274 &used) == DMA_SUCCESS) {
1275 /* Safe to free early-copied skbs now */
1276 __skb_queue_purge(&sk->sk_async_wait_queue);
1277 break;
1278 } else {
1279 struct sk_buff *skb;
1280 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1281 (dma_async_is_complete(skb->dma_cookie, done,
1282 used) == DMA_SUCCESS)) {
1283 __skb_dequeue(&sk->sk_async_wait_queue);
1284 kfree_skb(skb);
1285 }
1286 }
1287 } while (wait);
1288}
1289#endif
1290
1257static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1291static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1258{ 1292{
1259 struct sk_buff *skb; 1293 struct sk_buff *skb;
@@ -1335,6 +1369,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1335 sk_eat_skb(sk, skb, 0); 1369 sk_eat_skb(sk, skb, 0);
1336 if (!desc->count) 1370 if (!desc->count)
1337 break; 1371 break;
1372 tp->copied_seq = seq;
1338 } 1373 }
1339 tp->copied_seq = seq; 1374 tp->copied_seq = seq;
1340 1375
@@ -1546,6 +1581,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1546 /* __ Set realtime policy in scheduler __ */ 1581 /* __ Set realtime policy in scheduler __ */
1547 } 1582 }
1548 1583
1584#ifdef CONFIG_NET_DMA
1585 if (tp->ucopy.dma_chan)
1586 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1587#endif
1549 if (copied >= target) { 1588 if (copied >= target) {
1550 /* Do not sleep, just process backlog. */ 1589 /* Do not sleep, just process backlog. */
1551 release_sock(sk); 1590 release_sock(sk);
@@ -1554,6 +1593,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1554 sk_wait_data(sk, &timeo); 1593 sk_wait_data(sk, &timeo);
1555 1594
1556#ifdef CONFIG_NET_DMA 1595#ifdef CONFIG_NET_DMA
1596 tcp_service_net_dma(sk, false); /* Don't block */
1557 tp->ucopy.wakeup = 0; 1597 tp->ucopy.wakeup = 0;
1558#endif 1598#endif
1559 1599
@@ -1633,6 +1673,9 @@ do_prequeue:
1633 copied = -EFAULT; 1673 copied = -EFAULT;
1634 break; 1674 break;
1635 } 1675 }
1676
1677 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1678
1636 if ((offset + used) == skb->len) 1679 if ((offset + used) == skb->len)
1637 copied_early = 1; 1680 copied_early = 1;
1638 1681
@@ -1702,27 +1745,9 @@ skip_copy:
1702 } 1745 }
1703 1746
1704#ifdef CONFIG_NET_DMA 1747#ifdef CONFIG_NET_DMA
1705 if (tp->ucopy.dma_chan) { 1748 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1706 dma_cookie_t done, used; 1749 tp->ucopy.dma_chan = NULL;
1707
1708 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1709
1710 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1711 tp->ucopy.dma_cookie, &done,
1712 &used) == DMA_IN_PROGRESS) {
1713 /* do partial cleanup of sk_async_wait_queue */
1714 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1715 (dma_async_is_complete(skb->dma_cookie, done,
1716 used) == DMA_SUCCESS)) {
1717 __skb_dequeue(&sk->sk_async_wait_queue);
1718 kfree_skb(skb);
1719 }
1720 }
1721 1750
1722 /* Safe to free early-copied skbs now */
1723 __skb_queue_purge(&sk->sk_async_wait_queue);
1724 tp->ucopy.dma_chan = NULL;
1725 }
1726 if (tp->ucopy.pinned_list) { 1751 if (tp->ucopy.pinned_list) {
1727 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1752 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1728 tp->ucopy.pinned_list = NULL; 1753 tp->ucopy.pinned_list = NULL;
@@ -2229,6 +2254,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2229 } 2254 }
2230 break; 2255 break;
2231 2256
2257 case TCP_THIN_LINEAR_TIMEOUTS:
2258 if (val < 0 || val > 1)
2259 err = -EINVAL;
2260 else
2261 tp->thin_lto = val;
2262 break;
2263
2264 case TCP_THIN_DUPACK:
2265 if (val < 0 || val > 1)
2266 err = -EINVAL;
2267 else
2268 tp->thin_dupack = val;
2269 break;
2270
2232 case TCP_CORK: 2271 case TCP_CORK:
2233 /* When set indicates to always queue non-full frames. 2272 /* When set indicates to always queue non-full frames.
2234 * Later the user clears this option and we transmit 2273 * Later the user clears this option and we transmit
@@ -2788,10 +2827,10 @@ EXPORT_SYMBOL(tcp_gro_complete);
2788 2827
2789#ifdef CONFIG_TCP_MD5SIG 2828#ifdef CONFIG_TCP_MD5SIG
2790static unsigned long tcp_md5sig_users; 2829static unsigned long tcp_md5sig_users;
2791static struct tcp_md5sig_pool **tcp_md5sig_pool; 2830static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
2792static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2831static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2793 2832
2794static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2833static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
2795{ 2834{
2796 int cpu; 2835 int cpu;
2797 for_each_possible_cpu(cpu) { 2836 for_each_possible_cpu(cpu) {
@@ -2808,7 +2847,7 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2808 2847
2809void tcp_free_md5sig_pool(void) 2848void tcp_free_md5sig_pool(void)
2810{ 2849{
2811 struct tcp_md5sig_pool **pool = NULL; 2850 struct tcp_md5sig_pool * __percpu *pool = NULL;
2812 2851
2813 spin_lock_bh(&tcp_md5sig_pool_lock); 2852 spin_lock_bh(&tcp_md5sig_pool_lock);
2814 if (--tcp_md5sig_users == 0) { 2853 if (--tcp_md5sig_users == 0) {
@@ -2822,10 +2861,11 @@ void tcp_free_md5sig_pool(void)
2822 2861
2823EXPORT_SYMBOL(tcp_free_md5sig_pool); 2862EXPORT_SYMBOL(tcp_free_md5sig_pool);
2824 2863
2825static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) 2864static struct tcp_md5sig_pool * __percpu *
2865__tcp_alloc_md5sig_pool(struct sock *sk)
2826{ 2866{
2827 int cpu; 2867 int cpu;
2828 struct tcp_md5sig_pool **pool; 2868 struct tcp_md5sig_pool * __percpu *pool;
2829 2869
2830 pool = alloc_percpu(struct tcp_md5sig_pool *); 2870 pool = alloc_percpu(struct tcp_md5sig_pool *);
2831 if (!pool) 2871 if (!pool)
@@ -2852,9 +2892,9 @@ out_free:
2852 return NULL; 2892 return NULL;
2853} 2893}
2854 2894
2855struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) 2895struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2856{ 2896{
2857 struct tcp_md5sig_pool **pool; 2897 struct tcp_md5sig_pool * __percpu *pool;
2858 int alloc = 0; 2898 int alloc = 0;
2859 2899
2860retry: 2900retry:
@@ -2873,7 +2913,9 @@ retry:
2873 2913
2874 if (alloc) { 2914 if (alloc) {
2875 /* we cannot hold spinlock here because this may sleep. */ 2915 /* we cannot hold spinlock here because this may sleep. */
2876 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); 2916 struct tcp_md5sig_pool * __percpu *p;
2917
2918 p = __tcp_alloc_md5sig_pool(sk);
2877 spin_lock_bh(&tcp_md5sig_pool_lock); 2919 spin_lock_bh(&tcp_md5sig_pool_lock);
2878 if (!p) { 2920 if (!p) {
2879 tcp_md5sig_users--; 2921 tcp_md5sig_users--;
@@ -2897,7 +2939,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2897 2939
2898struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) 2940struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2899{ 2941{
2900 struct tcp_md5sig_pool **p; 2942 struct tcp_md5sig_pool * __percpu *p;
2901 spin_lock_bh(&tcp_md5sig_pool_lock); 2943 spin_lock_bh(&tcp_md5sig_pool_lock);
2902 p = tcp_md5sig_pool; 2944 p = tcp_md5sig_pool;
2903 if (p) 2945 if (p)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6428b342b164..0ec9bd0ae94f 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -10,6 +10,7 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/gfp.h>
13#include <net/tcp.h> 14#include <net/tcp.h>
14 15
15int sysctl_tcp_max_ssthresh = 0; 16int sysctl_tcp_max_ssthresh = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3fddc69ccccc..f240f57b2199 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -62,6 +62,7 @@
62 */ 62 */
63 63
64#include <linux/mm.h> 64#include <linux/mm.h>
65#include <linux/slab.h>
65#include <linux/module.h> 66#include <linux/module.h>
66#include <linux/sysctl.h> 67#include <linux/sysctl.h>
67#include <linux/kernel.h> 68#include <linux/kernel.h>
@@ -89,6 +90,8 @@ int sysctl_tcp_frto __read_mostly = 2;
89int sysctl_tcp_frto_response __read_mostly; 90int sysctl_tcp_frto_response __read_mostly;
90int sysctl_tcp_nometrics_save __read_mostly; 91int sysctl_tcp_nometrics_save __read_mostly;
91 92
93int sysctl_tcp_thin_dupack __read_mostly;
94
92int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 95int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
93int sysctl_tcp_abc __read_mostly; 96int sysctl_tcp_abc __read_mostly;
94 97
@@ -2447,6 +2450,16 @@ static int tcp_time_to_recover(struct sock *sk)
2447 return 1; 2450 return 1;
2448 } 2451 }
2449 2452
2453 /* If a thin stream is detected, retransmit after first
2454 * received dupack. Employ only if SACK is supported in order
2455 * to avoid possible corner-case series of spurious retransmissions
2456 * Use only if there are no unsent data.
2457 */
2458 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
2459 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
2460 tcp_is_sack(tp) && !tcp_send_head(sk))
2461 return 1;
2462
2450 return 0; 2463 return 0;
2451} 2464}
2452 2465
@@ -2499,6 +2512,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2499 int err; 2512 int err;
2500 unsigned int mss; 2513 unsigned int mss;
2501 2514
2515 if (packets == 0)
2516 return;
2517
2502 WARN_ON(packets > tp->packets_out); 2518 WARN_ON(packets > tp->packets_out);
2503 if (tp->lost_skb_hint) { 2519 if (tp->lost_skb_hint) {
2504 skb = tp->lost_skb_hint; 2520 skb = tp->lost_skb_hint;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebfd078a..3c23e70885f4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -60,6 +60,7 @@
60#include <linux/jhash.h> 60#include <linux/jhash.h>
61#include <linux/init.h> 61#include <linux/init.h>
62#include <linux/times.h> 62#include <linux/times.h>
63#include <linux/slab.h>
63 64
64#include <net/net_namespace.h> 65#include <net/net_namespace.h>
65#include <net/icmp.h> 66#include <net/icmp.h>
@@ -370,6 +371,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
370 if (sk->sk_state == TCP_CLOSE) 371 if (sk->sk_state == TCP_CLOSE)
371 goto out; 372 goto out;
372 373
374 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
376 goto out;
377 }
378
373 icsk = inet_csk(sk); 379 icsk = inet_csk(sk);
374 tp = tcp_sk(sk); 380 tp = tcp_sk(sk);
375 seq = ntohl(th->seq); 381 seq = ntohl(th->seq);
@@ -742,9 +748,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742 * This still operates on a request_sock only, not on a big 748 * This still operates on a request_sock only, not on a big
743 * socket. 749 * socket.
744 */ 750 */
745static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 751static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct request_sock *req, 752 struct request_sock *req,
747 struct request_values *rvp) 753 struct request_values *rvp)
748{ 754{
749 const struct inet_request_sock *ireq = inet_rsk(req); 755 const struct inet_request_sock *ireq = inet_rsk(req);
750 int err = -1; 756 int err = -1;
@@ -775,10 +781,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
775 return err; 781 return err;
776} 782}
777 783
778static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 784static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp) 785 struct request_values *rvp)
780{ 786{
781 return __tcp_v4_send_synack(sk, NULL, req, rvp); 787 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
788 return tcp_v4_send_synack(sk, NULL, req, rvp);
782} 789}
783 790
784/* 791/*
@@ -1192,10 +1199,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1192struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1199struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1193 .family = PF_INET, 1200 .family = PF_INET,
1194 .obj_size = sizeof(struct tcp_request_sock), 1201 .obj_size = sizeof(struct tcp_request_sock),
1195 .rtx_syn_ack = tcp_v4_send_synack, 1202 .rtx_syn_ack = tcp_v4_rtx_synack,
1196 .send_ack = tcp_v4_reqsk_send_ack, 1203 .send_ack = tcp_v4_reqsk_send_ack,
1197 .destructor = tcp_v4_reqsk_destructor, 1204 .destructor = tcp_v4_reqsk_destructor,
1198 .send_reset = tcp_v4_send_reset, 1205 .send_reset = tcp_v4_send_reset,
1206 .syn_ack_timeout = tcp_syn_ack_timeout,
1199}; 1207};
1200 1208
1201#ifdef CONFIG_TCP_MD5SIG 1209#ifdef CONFIG_TCP_MD5SIG
@@ -1373,8 +1381,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1373 } 1381 }
1374 tcp_rsk(req)->snt_isn = isn; 1382 tcp_rsk(req)->snt_isn = isn;
1375 1383
1376 if (__tcp_v4_send_synack(sk, dst, req, 1384 if (tcp_v4_send_synack(sk, dst, req,
1377 (struct request_values *)&tmp_ext) || 1385 (struct request_values *)&tmp_ext) ||
1378 want_cookie) 1386 want_cookie)
1379 goto drop_and_free; 1387 goto drop_and_free;
1380 1388
@@ -1653,6 +1661,11 @@ process:
1653 if (sk->sk_state == TCP_TIME_WAIT) 1661 if (sk->sk_state == TCP_TIME_WAIT)
1654 goto do_time_wait; 1662 goto do_time_wait;
1655 1663
1664 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1665 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1666 goto discard_and_relse;
1667 }
1668
1656 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1669 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1657 goto discard_and_relse; 1670 goto discard_and_relse;
1658 nf_reset(skb); 1671 nf_reset(skb);
@@ -1677,8 +1690,11 @@ process:
1677 if (!tcp_prequeue(sk, skb)) 1690 if (!tcp_prequeue(sk, skb))
1678 ret = tcp_v4_do_rcv(sk, skb); 1691 ret = tcp_v4_do_rcv(sk, skb);
1679 } 1692 }
1680 } else 1693 } else if (unlikely(sk_add_backlog(sk, skb))) {
1681 sk_add_backlog(sk, skb); 1694 bh_unlock_sock(sk);
1695 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1696 goto discard_and_relse;
1697 }
1682 bh_unlock_sock(sk); 1698 bh_unlock_sock(sk);
1683 1699
1684 sock_put(sk); 1700 sock_put(sk);
@@ -2425,12 +2441,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2425 }, 2441 },
2426}; 2442};
2427 2443
2428static int tcp4_proc_init_net(struct net *net) 2444static int __net_init tcp4_proc_init_net(struct net *net)
2429{ 2445{
2430 return tcp_proc_register(net, &tcp4_seq_afinfo); 2446 return tcp_proc_register(net, &tcp4_seq_afinfo);
2431} 2447}
2432 2448
2433static void tcp4_proc_exit_net(struct net *net) 2449static void __net_exit tcp4_proc_exit_net(struct net *net)
2434{ 2450{
2435 tcp_proc_unregister(net, &tcp4_seq_afinfo); 2451 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2436} 2452}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f206ee5dda80..5fabff9ac6d6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/slab.h>
23#include <linux/sysctl.h> 24#include <linux/sysctl.h>
24#include <linux/workqueue.h> 25#include <linux/workqueue.h>
25#include <net/tcp.h> 26#include <net/tcp.h>
@@ -728,7 +729,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
728 * in main socket hash table and lock on listening 729 * in main socket hash table and lock on listening
729 * socket does not protect us more. 730 * socket does not protect us more.
730 */ 731 */
731 sk_add_backlog(child, skb); 732 __sk_add_backlog(child, skb);
732 } 733 }
733 734
734 bh_unlock_sock(child); 735 bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383ce237640f..0dda86e72ad8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -37,6 +37,7 @@
37#include <net/tcp.h> 37#include <net/tcp.h>
38 38
39#include <linux/compiler.h> 39#include <linux/compiler.h>
40#include <linux/gfp.h>
40#include <linux/module.h> 41#include <linux/module.h>
41 42
42/* People can turn this off for buggy TCP's found in printers etc. */ 43/* People can turn this off for buggy TCP's found in printers etc. */
@@ -183,7 +184,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
183 */ 184 */
184void tcp_select_initial_window(int __space, __u32 mss, 185void tcp_select_initial_window(int __space, __u32 mss,
185 __u32 *rcv_wnd, __u32 *window_clamp, 186 __u32 *rcv_wnd, __u32 *window_clamp,
186 int wscale_ok, __u8 *rcv_wscale) 187 int wscale_ok, __u8 *rcv_wscale,
188 __u32 init_rcv_wnd)
187{ 189{
188 unsigned int space = (__space < 0 ? 0 : __space); 190 unsigned int space = (__space < 0 ? 0 : __space);
189 191
@@ -232,7 +234,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
232 init_cwnd = 2; 234 init_cwnd = 2;
233 else if (mss > 1460) 235 else if (mss > 1460)
234 init_cwnd = 3; 236 init_cwnd = 3;
235 if (*rcv_wnd > init_cwnd * mss) 237 /* when initializing use the value from init_rcv_wnd
238 * rather than the default from above
239 */
240 if (init_rcv_wnd &&
241 (*rcv_wnd > init_rcv_wnd * mss))
242 *rcv_wnd = init_rcv_wnd * mss;
243 else if (*rcv_wnd > init_cwnd * mss)
236 *rcv_wnd = init_cwnd * mss; 244 *rcv_wnd = init_cwnd * mss;
237 } 245 }
238 246
@@ -1794,11 +1802,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1794void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1802void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1795 int nonagle) 1803 int nonagle)
1796{ 1804{
1797 struct sk_buff *skb = tcp_send_head(sk);
1798
1799 if (!skb)
1800 return;
1801
1802 /* If we are closed, the bytes will have to remain here. 1805 /* If we are closed, the bytes will have to remain here.
1803 * In time closedown will finish, we empty the write queue and 1806 * In time closedown will finish, we empty the write queue and
1804 * all will be happy. 1807 * all will be happy.
@@ -2393,13 +2396,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2393 struct tcp_extend_values *xvp = tcp_xv(rvp); 2396 struct tcp_extend_values *xvp = tcp_xv(rvp);
2394 struct inet_request_sock *ireq = inet_rsk(req); 2397 struct inet_request_sock *ireq = inet_rsk(req);
2395 struct tcp_sock *tp = tcp_sk(sk); 2398 struct tcp_sock *tp = tcp_sk(sk);
2399 const struct tcp_cookie_values *cvp = tp->cookie_values;
2396 struct tcphdr *th; 2400 struct tcphdr *th;
2397 struct sk_buff *skb; 2401 struct sk_buff *skb;
2398 struct tcp_md5sig_key *md5; 2402 struct tcp_md5sig_key *md5;
2399 int tcp_header_size; 2403 int tcp_header_size;
2400 int mss; 2404 int mss;
2405 int s_data_desired = 0;
2401 2406
2402 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2407 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2408 s_data_desired = cvp->s_data_desired;
2409 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
2403 if (skb == NULL) 2410 if (skb == NULL)
2404 return NULL; 2411 return NULL;
2405 2412
@@ -2422,7 +2429,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2422 &req->rcv_wnd, 2429 &req->rcv_wnd,
2423 &req->window_clamp, 2430 &req->window_clamp,
2424 ireq->wscale_ok, 2431 ireq->wscale_ok,
2425 &rcv_wscale); 2432 &rcv_wscale,
2433 dst_metric(dst, RTAX_INITRWND));
2426 ireq->rcv_wscale = rcv_wscale; 2434 ireq->rcv_wscale = rcv_wscale;
2427 } 2435 }
2428 2436
@@ -2454,16 +2462,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2454 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); 2462 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
2455 2463
2456 if (OPTION_COOKIE_EXTENSION & opts.options) { 2464 if (OPTION_COOKIE_EXTENSION & opts.options) {
2457 const struct tcp_cookie_values *cvp = tp->cookie_values; 2465 if (s_data_desired) {
2458 2466 u8 *buf = skb_put(skb, s_data_desired);
2459 if (cvp != NULL &&
2460 cvp->s_data_constant &&
2461 cvp->s_data_desired > 0) {
2462 u8 *buf = skb_put(skb, cvp->s_data_desired);
2463 2467
2464 /* copy data directly from the listening socket. */ 2468 /* copy data directly from the listening socket. */
2465 memcpy(buf, cvp->s_data_payload, cvp->s_data_desired); 2469 memcpy(buf, cvp->s_data_payload, s_data_desired);
2466 TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired; 2470 TCP_SKB_CB(skb)->end_seq += s_data_desired;
2467 } 2471 }
2468 2472
2469 if (opts.hash_size > 0) { 2473 if (opts.hash_size > 0) {
@@ -2549,7 +2553,8 @@ static void tcp_connect_init(struct sock *sk)
2549 &tp->rcv_wnd, 2553 &tp->rcv_wnd,
2550 &tp->window_clamp, 2554 &tp->window_clamp,
2551 sysctl_tcp_window_scaling, 2555 sysctl_tcp_window_scaling,
2552 &rcv_wscale); 2556 &rcv_wscale,
2557 dst_metric(dst, RTAX_INITRWND));
2553 2558
2554 tp->rx_opt.rcv_wscale = rcv_wscale; 2559 tp->rx_opt.rcv_wscale = rcv_wscale;
2555 tp->rcv_ssthresh = tp->rcv_wnd; 2560 tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 9bc805df95d2..f8efada580e8 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -22,6 +22,7 @@
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/socket.h> 23#include <linux/socket.h>
24#include <linux/tcp.h> 24#include <linux/tcp.h>
25#include <linux/slab.h>
25#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/ktime.h> 28#include <linux/ktime.h>
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8816a20c2597..8a0ab2977f1f 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/gfp.h>
22#include <net/tcp.h> 23#include <net/tcp.h>
23 24
24int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; 25int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
@@ -29,6 +30,7 @@ int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
29int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; 30int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
30int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; 31int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
31int sysctl_tcp_orphan_retries __read_mostly; 32int sysctl_tcp_orphan_retries __read_mostly;
33int sysctl_tcp_thin_linear_timeouts __read_mostly;
32 34
33static void tcp_write_timer(unsigned long); 35static void tcp_write_timer(unsigned long);
34static void tcp_delack_timer(unsigned long); 36static void tcp_delack_timer(unsigned long);
@@ -133,7 +135,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
133} 135}
134 136
135/* This function calculates a "timeout" which is equivalent to the timeout of a 137/* This function calculates a "timeout" which is equivalent to the timeout of a
136 * TCP connection after "boundary" unsucessful, exponentially backed-off 138 * TCP connection after "boundary" unsuccessful, exponentially backed-off
137 * retransmissions with an initial RTO of TCP_RTO_MIN. 139 * retransmissions with an initial RTO of TCP_RTO_MIN.
138 */ 140 */
139static bool retransmits_timed_out(struct sock *sk, 141static bool retransmits_timed_out(struct sock *sk,
@@ -415,7 +417,25 @@ void tcp_retransmit_timer(struct sock *sk)
415 icsk->icsk_retransmits++; 417 icsk->icsk_retransmits++;
416 418
417out_reset_timer: 419out_reset_timer:
418 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 420 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
421 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
422 * might be increased if the stream oscillates between thin and thick,
423 * thus the old value might already be too high compared to the value
424 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
425 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
426 * exponential backoff behaviour to avoid continue hammering
427 * linear-timeout retransmissions into a black hole
428 */
429 if (sk->sk_state == TCP_ESTABLISHED &&
430 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
431 tcp_stream_is_thin(tp) &&
432 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
433 icsk->icsk_backoff = 0;
434 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
435 } else {
436 /* Use normal (exponential) backoff */
437 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
438 }
419 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 439 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
420 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 440 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
421 __sk_dst_reset(sk); 441 __sk_dst_reset(sk);
@@ -474,6 +494,12 @@ static void tcp_synack_timer(struct sock *sk)
474 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 494 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
475} 495}
476 496
497void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
498{
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
500}
501EXPORT_SYMBOL(tcp_syn_ack_timeout);
502
477void tcp_set_keepalive(struct sock *sk, int val) 503void tcp_set_keepalive(struct sock *sk, int val)
478{ 504{
479 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 505 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 3959e0ca456a..3b3813cc80b9 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -8,6 +8,7 @@
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/slab.h>
11#include <net/icmp.h> 12#include <net/icmp.h>
12#include <net/ip.h> 13#include <net/ip.h>
13#include <net/protocol.h> 14#include <net/protocol.h>
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fdd7e04..8fef859db35d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -95,6 +95,7 @@
95#include <linux/mm.h> 95#include <linux/mm.h>
96#include <linux/inet.h> 96#include <linux/inet.h>
97#include <linux/netdevice.h> 97#include <linux/netdevice.h>
98#include <linux/slab.h>
98#include <net/tcp_states.h> 99#include <net/tcp_states.h>
99#include <linux/skbuff.h> 100#include <linux/skbuff.h>
100#include <linux/proc_fs.h> 101#include <linux/proc_fs.h>
@@ -471,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
471 if (hslot->count < hslot2->count) 472 if (hslot->count < hslot2->count)
472 goto begin; 473 goto begin;
473 474
474 result = udp4_lib_lookup2(net, INADDR_ANY, sport, 475 result = udp4_lib_lookup2(net, saddr, sport,
475 daddr, hnum, dif, 476 INADDR_ANY, hnum, dif,
476 hslot2, slot2); 477 hslot2, slot2);
477 } 478 }
478 rcu_read_unlock(); 479 rcu_read_unlock();
@@ -1117,7 +1118,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1117 struct inet_sock *inet = inet_sk(sk); 1118 struct inet_sock *inet = inet_sk(sk);
1118 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 1119 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1119 struct sk_buff *skb; 1120 struct sk_buff *skb;
1120 unsigned int ulen, copied; 1121 unsigned int ulen;
1121 int peeked; 1122 int peeked;
1122 int err; 1123 int err;
1123 int is_udplite = IS_UDPLITE(sk); 1124 int is_udplite = IS_UDPLITE(sk);
@@ -1138,10 +1139,9 @@ try_again:
1138 goto out; 1139 goto out;
1139 1140
1140 ulen = skb->len - sizeof(struct udphdr); 1141 ulen = skb->len - sizeof(struct udphdr);
1141 copied = len; 1142 if (len > ulen)
1142 if (copied > ulen) 1143 len = ulen;
1143 copied = ulen; 1144 else if (len < ulen)
1144 else if (copied < ulen)
1145 msg->msg_flags |= MSG_TRUNC; 1145 msg->msg_flags |= MSG_TRUNC;
1146 1146
1147 /* 1147 /*
@@ -1150,14 +1150,14 @@ try_again:
1150 * coverage checksum (UDP-Lite), do it before the copy. 1150 * coverage checksum (UDP-Lite), do it before the copy.
1151 */ 1151 */
1152 1152
1153 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 1153 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
1154 if (udp_lib_checksum_complete(skb)) 1154 if (udp_lib_checksum_complete(skb))
1155 goto csum_copy_err; 1155 goto csum_copy_err;
1156 } 1156 }
1157 1157
1158 if (skb_csum_unnecessary(skb)) 1158 if (skb_csum_unnecessary(skb))
1159 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 1159 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
1160 msg->msg_iov, copied); 1160 msg->msg_iov, len);
1161 else { 1161 else {
1162 err = skb_copy_and_csum_datagram_iovec(skb, 1162 err = skb_copy_and_csum_datagram_iovec(skb,
1163 sizeof(struct udphdr), 1163 sizeof(struct udphdr),
@@ -1186,7 +1186,7 @@ try_again:
1186 if (inet->cmsg_flags) 1186 if (inet->cmsg_flags)
1187 ip_cmsg_recv(msg, skb); 1187 ip_cmsg_recv(msg, skb);
1188 1188
1189 err = copied; 1189 err = len;
1190 if (flags & MSG_TRUNC) 1190 if (flags & MSG_TRUNC)
1191 err = ulen; 1191 err = ulen;
1192 1192
@@ -1372,8 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1372 bh_lock_sock(sk); 1372 bh_lock_sock(sk);
1373 if (!sock_owned_by_user(sk)) 1373 if (!sock_owned_by_user(sk))
1374 rc = __udp_queue_rcv_skb(sk, skb); 1374 rc = __udp_queue_rcv_skb(sk, skb);
1375 else 1375 else if (sk_add_backlog(sk, skb)) {
1376 sk_add_backlog(sk, skb); 1376 bh_unlock_sock(sk);
1377 goto drop;
1378 }
1377 bh_unlock_sock(sk); 1379 bh_unlock_sock(sk);
1378 1380
1379 return rc; 1381 return rc;
@@ -2027,12 +2029,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
2027 }, 2029 },
2028}; 2030};
2029 2031
2030static int udp4_proc_init_net(struct net *net) 2032static int __net_init udp4_proc_init_net(struct net *net)
2031{ 2033{
2032 return udp_proc_register(net, &udp4_seq_afinfo); 2034 return udp_proc_register(net, &udp4_seq_afinfo);
2033} 2035}
2034 2036
2035static void udp4_proc_exit_net(struct net *net) 2037static void __net_exit udp4_proc_exit_net(struct net *net)
2036{ 2038{
2037 udp_proc_unregister(net, &udp4_seq_afinfo); 2039 udp_proc_unregister(net, &udp4_seq_afinfo);
2038} 2040}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 66f79513f4a5..6610bf76369f 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = {
81 }, 81 },
82}; 82};
83 83
84static int udplite4_proc_init_net(struct net *net) 84static int __net_init udplite4_proc_init_net(struct net *net)
85{ 85{
86 return udp_proc_register(net, &udplite4_seq_afinfo); 86 return udp_proc_register(net, &udplite4_seq_afinfo);
87} 87}
88 88
89static void udplite4_proc_exit_net(struct net *net) 89static void __net_exit udplite4_proc_exit_net(struct net *net)
90{ 90{
91 udp_proc_unregister(net, &udplite4_seq_afinfo); 91 udp_proc_unregister(net, &udplite4_seq_afinfo);
92} 92}
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index f9f922a0ba88..c791bb63203f 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -9,6 +9,7 @@
9 * 9 *
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/string.h> 14#include <linux/string.h>
14#include <linux/netfilter.h> 15#include <linux/netfilter.h>
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 3444f3b34eca..6f368413eb0e 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
5 */ 5 */
6 6
7#include <linux/gfp.h>
7#include <linux/init.h> 8#include <linux/init.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/module.h> 10#include <linux/module.h>
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 67107d63c1cd..e4a1483fba77 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
91 return 0; 91 return 0;
92} 92}
93 93
94static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) 94static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
95 struct flowi *fl)
95{ 96{
96 struct rtable *rt = (struct rtable *)xdst->route; 97 struct rtable *rt = (struct rtable *)xdst->route;
97 98
98 xdst->u.rt.fl = rt->fl; 99 xdst->u.rt.fl = *fl;
99 100
100 xdst->u.dst.dev = dev; 101 xdst->u.dst.dev = dev;
101 dev_hold(dev); 102 dev_hold(dev);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 143791da062c..413054f02aab 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -53,6 +53,7 @@
53#include <linux/route.h> 53#include <linux/route.h>
54#include <linux/inetdevice.h> 54#include <linux/inetdevice.h>
55#include <linux/init.h> 55#include <linux/init.h>
56#include <linux/slab.h>
56#ifdef CONFIG_SYSCTL 57#ifdef CONFIG_SYSCTL
57#include <linux/sysctl.h> 58#include <linux/sysctl.h>
58#endif 59#endif
@@ -278,31 +279,31 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
278 279
279static int snmp6_alloc_dev(struct inet6_dev *idev) 280static int snmp6_alloc_dev(struct inet6_dev *idev)
280{ 281{
281 if (snmp_mib_init((void **)idev->stats.ipv6, 282 if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
282 sizeof(struct ipstats_mib)) < 0) 283 sizeof(struct ipstats_mib)) < 0)
283 goto err_ip; 284 goto err_ip;
284 if (snmp_mib_init((void **)idev->stats.icmpv6, 285 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6,
285 sizeof(struct icmpv6_mib)) < 0) 286 sizeof(struct icmpv6_mib)) < 0)
286 goto err_icmp; 287 goto err_icmp;
287 if (snmp_mib_init((void **)idev->stats.icmpv6msg, 288 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg,
288 sizeof(struct icmpv6msg_mib)) < 0) 289 sizeof(struct icmpv6msg_mib)) < 0)
289 goto err_icmpmsg; 290 goto err_icmpmsg;
290 291
291 return 0; 292 return 0;
292 293
293err_icmpmsg: 294err_icmpmsg:
294 snmp_mib_free((void **)idev->stats.icmpv6); 295 snmp_mib_free((void __percpu **)idev->stats.icmpv6);
295err_icmp: 296err_icmp:
296 snmp_mib_free((void **)idev->stats.ipv6); 297 snmp_mib_free((void __percpu **)idev->stats.ipv6);
297err_ip: 298err_ip:
298 return -ENOMEM; 299 return -ENOMEM;
299} 300}
300 301
301static void snmp6_free_dev(struct inet6_dev *idev) 302static void snmp6_free_dev(struct inet6_dev *idev)
302{ 303{
303 snmp_mib_free((void **)idev->stats.icmpv6msg); 304 snmp_mib_free((void __percpu **)idev->stats.icmpv6msg);
304 snmp_mib_free((void **)idev->stats.icmpv6); 305 snmp_mib_free((void __percpu **)idev->stats.icmpv6);
305 snmp_mib_free((void **)idev->stats.ipv6); 306 snmp_mib_free((void __percpu **)idev->stats.ipv6);
306} 307}
307 308
308/* Nobody refers to this device, we may destroy it. */ 309/* Nobody refers to this device, we may destroy it. */
@@ -992,8 +993,7 @@ struct ipv6_saddr_dst {
992 993
993static inline int ipv6_saddr_preferred(int type) 994static inline int ipv6_saddr_preferred(int type)
994{ 995{
995 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4| 996 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
996 IPV6_ADDR_LOOPBACK|IPV6_ADDR_RESERVED))
997 return 1; 997 return 1;
998 return 0; 998 return 0;
999} 999}
@@ -1381,6 +1381,8 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1381 if (dad_failed) 1381 if (dad_failed)
1382 ifp->flags |= IFA_F_DADFAILED; 1382 ifp->flags |= IFA_F_DADFAILED;
1383 spin_unlock_bh(&ifp->lock); 1383 spin_unlock_bh(&ifp->lock);
1384 if (dad_failed)
1385 ipv6_ifa_notify(0, ifp);
1384 in6_ifa_put(ifp); 1386 in6_ifa_put(ifp);
1385#ifdef CONFIG_IPV6_PRIVACY 1387#ifdef CONFIG_IPV6_PRIVACY
1386 } else if (ifp->flags&IFA_F_TEMPORARY) { 1388 } else if (ifp->flags&IFA_F_TEMPORARY) {
@@ -2616,7 +2618,7 @@ static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
2616static int addrconf_ifdown(struct net_device *dev, int how) 2618static int addrconf_ifdown(struct net_device *dev, int how)
2617{ 2619{
2618 struct inet6_dev *idev; 2620 struct inet6_dev *idev;
2619 struct inet6_ifaddr *ifa, **bifa; 2621 struct inet6_ifaddr *ifa, *keep_list, **bifa;
2620 struct net *net = dev_net(dev); 2622 struct net *net = dev_net(dev);
2621 int i; 2623 int i;
2622 2624
@@ -2649,11 +2651,12 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2649 2651
2650 write_lock_bh(&addrconf_hash_lock); 2652 write_lock_bh(&addrconf_hash_lock);
2651 while ((ifa = *bifa) != NULL) { 2653 while ((ifa = *bifa) != NULL) {
2652 if (ifa->idev == idev) { 2654 if (ifa->idev == idev &&
2655 (how || !(ifa->flags&IFA_F_PERMANENT) ||
2656 ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2653 *bifa = ifa->lst_next; 2657 *bifa = ifa->lst_next;
2654 ifa->lst_next = NULL; 2658 ifa->lst_next = NULL;
2655 addrconf_del_timer(ifa); 2659 __in6_ifa_put(ifa);
2656 in6_ifa_put(ifa);
2657 continue; 2660 continue;
2658 } 2661 }
2659 bifa = &ifa->lst_next; 2662 bifa = &ifa->lst_next;
@@ -2689,11 +2692,40 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2689 write_lock_bh(&idev->lock); 2692 write_lock_bh(&idev->lock);
2690 } 2693 }
2691#endif 2694#endif
2695 keep_list = NULL;
2696 bifa = &keep_list;
2692 while ((ifa = idev->addr_list) != NULL) { 2697 while ((ifa = idev->addr_list) != NULL) {
2693 idev->addr_list = ifa->if_next; 2698 idev->addr_list = ifa->if_next;
2694 ifa->if_next = NULL; 2699 ifa->if_next = NULL;
2695 ifa->dead = 1; 2700
2696 addrconf_del_timer(ifa); 2701 addrconf_del_timer(ifa);
2702
2703 /* If just doing link down, and address is permanent
2704 and not link-local, then retain it. */
2705 if (how == 0 &&
2706 (ifa->flags&IFA_F_PERMANENT) &&
2707 !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2708
2709 /* Move to holding list */
2710 *bifa = ifa;
2711 bifa = &ifa->if_next;
2712
2713 /* If not doing DAD on this address, just keep it. */
2714 if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
2715 idev->cnf.accept_dad <= 0 ||
2716 (ifa->flags & IFA_F_NODAD))
2717 continue;
2718
2719 /* If it was tentative already, no need to notify */
2720 if (ifa->flags & IFA_F_TENTATIVE)
2721 continue;
2722
2723 /* Flag it for later restoration when link comes up */
2724 ifa->flags |= IFA_F_TENTATIVE;
2725 in6_ifa_hold(ifa);
2726 } else {
2727 ifa->dead = 1;
2728 }
2697 write_unlock_bh(&idev->lock); 2729 write_unlock_bh(&idev->lock);
2698 2730
2699 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2731 __ipv6_ifa_notify(RTM_DELADDR, ifa);
@@ -2702,6 +2734,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2702 2734
2703 write_lock_bh(&idev->lock); 2735 write_lock_bh(&idev->lock);
2704 } 2736 }
2737
2738 idev->addr_list = keep_list;
2739
2705 write_unlock_bh(&idev->lock); 2740 write_unlock_bh(&idev->lock);
2706 2741
2707 /* Step 5: Discard multicast list */ 2742 /* Step 5: Discard multicast list */
@@ -2727,28 +2762,29 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2727static void addrconf_rs_timer(unsigned long data) 2762static void addrconf_rs_timer(unsigned long data)
2728{ 2763{
2729 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; 2764 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
2765 struct inet6_dev *idev = ifp->idev;
2730 2766
2731 if (ifp->idev->cnf.forwarding) 2767 read_lock(&idev->lock);
2768 if (idev->dead || !(idev->if_flags & IF_READY))
2732 goto out; 2769 goto out;
2733 2770
2734 if (ifp->idev->if_flags & IF_RA_RCVD) { 2771 if (idev->cnf.forwarding)
2735 /* 2772 goto out;
2736 * Announcement received after solicitation 2773
2737 * was sent 2774 /* Announcement received after solicitation was sent */
2738 */ 2775 if (idev->if_flags & IF_RA_RCVD)
2739 goto out; 2776 goto out;
2740 }
2741 2777
2742 spin_lock(&ifp->lock); 2778 spin_lock(&ifp->lock);
2743 if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) { 2779 if (ifp->probes++ < idev->cnf.rtr_solicits) {
2744 /* The wait after the last probe can be shorter */ 2780 /* The wait after the last probe can be shorter */
2745 addrconf_mod_timer(ifp, AC_RS, 2781 addrconf_mod_timer(ifp, AC_RS,
2746 (ifp->probes == ifp->idev->cnf.rtr_solicits) ? 2782 (ifp->probes == idev->cnf.rtr_solicits) ?
2747 ifp->idev->cnf.rtr_solicit_delay : 2783 idev->cnf.rtr_solicit_delay :
2748 ifp->idev->cnf.rtr_solicit_interval); 2784 idev->cnf.rtr_solicit_interval);
2749 spin_unlock(&ifp->lock); 2785 spin_unlock(&ifp->lock);
2750 2786
2751 ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); 2787 ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
2752 } else { 2788 } else {
2753 spin_unlock(&ifp->lock); 2789 spin_unlock(&ifp->lock);
2754 /* 2790 /*
@@ -2756,10 +2792,11 @@ static void addrconf_rs_timer(unsigned long data)
2756 * assumption any longer. 2792 * assumption any longer.
2757 */ 2793 */
2758 printk(KERN_DEBUG "%s: no IPv6 routers present\n", 2794 printk(KERN_DEBUG "%s: no IPv6 routers present\n",
2759 ifp->idev->dev->name); 2795 idev->dev->name);
2760 } 2796 }
2761 2797
2762out: 2798out:
2799 read_unlock(&idev->lock);
2763 in6_ifa_put(ifp); 2800 in6_ifa_put(ifp);
2764} 2801}
2765 2802
@@ -2792,14 +2829,14 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2792 read_lock_bh(&idev->lock); 2829 read_lock_bh(&idev->lock);
2793 if (ifp->dead) 2830 if (ifp->dead)
2794 goto out; 2831 goto out;
2795 spin_lock_bh(&ifp->lock);
2796 2832
2833 spin_lock(&ifp->lock);
2797 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 2834 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
2798 idev->cnf.accept_dad < 1 || 2835 idev->cnf.accept_dad < 1 ||
2799 !(ifp->flags&IFA_F_TENTATIVE) || 2836 !(ifp->flags&IFA_F_TENTATIVE) ||
2800 ifp->flags & IFA_F_NODAD) { 2837 ifp->flags & IFA_F_NODAD) {
2801 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 2838 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2802 spin_unlock_bh(&ifp->lock); 2839 spin_unlock(&ifp->lock);
2803 read_unlock_bh(&idev->lock); 2840 read_unlock_bh(&idev->lock);
2804 2841
2805 addrconf_dad_completed(ifp); 2842 addrconf_dad_completed(ifp);
@@ -2807,7 +2844,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2807 } 2844 }
2808 2845
2809 if (!(idev->if_flags & IF_READY)) { 2846 if (!(idev->if_flags & IF_READY)) {
2810 spin_unlock_bh(&ifp->lock); 2847 spin_unlock(&ifp->lock);
2811 read_unlock_bh(&idev->lock); 2848 read_unlock_bh(&idev->lock);
2812 /* 2849 /*
2813 * If the device is not ready: 2850 * If the device is not ready:
@@ -2827,7 +2864,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2827 ip6_ins_rt(ifp->rt); 2864 ip6_ins_rt(ifp->rt);
2828 2865
2829 addrconf_dad_kick(ifp); 2866 addrconf_dad_kick(ifp);
2830 spin_unlock_bh(&ifp->lock); 2867 spin_unlock(&ifp->lock);
2831out: 2868out:
2832 read_unlock_bh(&idev->lock); 2869 read_unlock_bh(&idev->lock);
2833} 2870}
@@ -2838,20 +2875,21 @@ static void addrconf_dad_timer(unsigned long data)
2838 struct inet6_dev *idev = ifp->idev; 2875 struct inet6_dev *idev = ifp->idev;
2839 struct in6_addr mcaddr; 2876 struct in6_addr mcaddr;
2840 2877
2841 read_lock_bh(&idev->lock); 2878 read_lock(&idev->lock);
2842 if (idev->dead) { 2879 if (idev->dead || !(idev->if_flags & IF_READY)) {
2843 read_unlock_bh(&idev->lock); 2880 read_unlock(&idev->lock);
2844 goto out; 2881 goto out;
2845 } 2882 }
2846 spin_lock_bh(&ifp->lock); 2883
2884 spin_lock(&ifp->lock);
2847 if (ifp->probes == 0) { 2885 if (ifp->probes == 0) {
2848 /* 2886 /*
2849 * DAD was successful 2887 * DAD was successful
2850 */ 2888 */
2851 2889
2852 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 2890 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2853 spin_unlock_bh(&ifp->lock); 2891 spin_unlock(&ifp->lock);
2854 read_unlock_bh(&idev->lock); 2892 read_unlock(&idev->lock);
2855 2893
2856 addrconf_dad_completed(ifp); 2894 addrconf_dad_completed(ifp);
2857 2895
@@ -2860,8 +2898,8 @@ static void addrconf_dad_timer(unsigned long data)
2860 2898
2861 ifp->probes--; 2899 ifp->probes--;
2862 addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); 2900 addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
2863 spin_unlock_bh(&ifp->lock); 2901 spin_unlock(&ifp->lock);
2864 read_unlock_bh(&idev->lock); 2902 read_unlock(&idev->lock);
2865 2903
2866 /* send a neighbour solicitation for our addr */ 2904 /* send a neighbour solicitation for our addr */
2867 addrconf_addr_solict_mult(&ifp->addr, &mcaddr); 2905 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
@@ -2908,12 +2946,12 @@ static void addrconf_dad_run(struct inet6_dev *idev) {
2908 2946
2909 read_lock_bh(&idev->lock); 2947 read_lock_bh(&idev->lock);
2910 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { 2948 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
2911 spin_lock_bh(&ifp->lock); 2949 spin_lock(&ifp->lock);
2912 if (!(ifp->flags & IFA_F_TENTATIVE)) { 2950 if (!(ifp->flags & IFA_F_TENTATIVE)) {
2913 spin_unlock_bh(&ifp->lock); 2951 spin_unlock(&ifp->lock);
2914 continue; 2952 continue;
2915 } 2953 }
2916 spin_unlock_bh(&ifp->lock); 2954 spin_unlock(&ifp->lock);
2917 addrconf_dad_kick(ifp); 2955 addrconf_dad_kick(ifp);
2918 } 2956 }
2919 read_unlock_bh(&idev->lock); 2957 read_unlock_bh(&idev->lock);
@@ -3030,14 +3068,14 @@ static const struct file_operations if6_fops = {
3030 .release = seq_release_net, 3068 .release = seq_release_net,
3031}; 3069};
3032 3070
3033static int if6_proc_net_init(struct net *net) 3071static int __net_init if6_proc_net_init(struct net *net)
3034{ 3072{
3035 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops)) 3073 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
3036 return -ENOMEM; 3074 return -ENOMEM;
3037 return 0; 3075 return 0;
3038} 3076}
3039 3077
3040static void if6_proc_net_exit(struct net *net) 3078static void __net_exit if6_proc_net_exit(struct net *net)
3041{ 3079{
3042 proc_net_remove(net, "if_inet6"); 3080 proc_net_remove(net, "if_inet6");
3043} 3081}
@@ -3573,7 +3611,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3573 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 3611 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3574 if (idx < s_idx) 3612 if (idx < s_idx)
3575 goto cont; 3613 goto cont;
3576 if (idx > s_idx) 3614 if (h > s_h || idx > s_idx)
3577 s_ip_idx = 0; 3615 s_ip_idx = 0;
3578 ip_idx = 0; 3616 ip_idx = 0;
3579 if ((idev = __in6_dev_get(dev)) == NULL) 3617 if ((idev = __in6_dev_get(dev)) == NULL)
@@ -3755,8 +3793,8 @@ static inline size_t inet6_if_nlmsg_size(void)
3755 ); 3793 );
3756} 3794}
3757 3795
3758static inline void __snmp6_fill_stats(u64 *stats, void **mib, int items, 3796static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3759 int bytes) 3797 int items, int bytes)
3760{ 3798{
3761 int i; 3799 int i;
3762 int pad = bytes - sizeof(u64) * items; 3800 int pad = bytes - sizeof(u64) * items;
@@ -3775,10 +3813,10 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3775{ 3813{
3776 switch(attrtype) { 3814 switch(attrtype) {
3777 case IFLA_INET6_STATS: 3815 case IFLA_INET6_STATS:
3778 __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3816 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
3779 break; 3817 break;
3780 case IFLA_INET6_ICMP6STATS: 3818 case IFLA_INET6_ICMP6STATS:
3781 __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3819 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
3782 break; 3820 break;
3783 } 3821 }
3784} 3822}
@@ -4414,8 +4452,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
4414 4452
4415static void addrconf_sysctl_register(struct inet6_dev *idev) 4453static void addrconf_sysctl_register(struct inet6_dev *idev)
4416{ 4454{
4417 neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6, 4455 neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
4418 NET_IPV6_NEIGH, "ipv6",
4419 &ndisc_ifinfo_sysctl_change); 4456 &ndisc_ifinfo_sysctl_change);
4420 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name, 4457 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
4421 idev, &idev->cnf); 4458 idev, &idev->cnf);
@@ -4430,7 +4467,7 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
4430 4467
4431#endif 4468#endif
4432 4469
4433static int addrconf_init_net(struct net *net) 4470static int __net_init addrconf_init_net(struct net *net)
4434{ 4471{
4435 int err; 4472 int err;
4436 struct ipv6_devconf *all, *dflt; 4473 struct ipv6_devconf *all, *dflt;
@@ -4479,7 +4516,7 @@ err_alloc_all:
4479 return err; 4516 return err;
4480} 4517}
4481 4518
4482static void addrconf_exit_net(struct net *net) 4519static void __net_exit addrconf_exit_net(struct net *net)
4483{ 4520{
4484#ifdef CONFIG_SYSCTL 4521#ifdef CONFIG_SYSCTL
4485 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); 4522 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 3f82e9542eda..6b03826552e1 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -72,7 +72,7 @@ int __ipv6_addr_type(const struct in6_addr *addr)
72 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ 72 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
73 } 73 }
74 74
75 return (IPV6_ADDR_RESERVED | 75 return (IPV6_ADDR_UNICAST |
76 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */ 76 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
77} 77}
78EXPORT_SYMBOL(__ipv6_addr_type); 78EXPORT_SYMBOL(__ipv6_addr_type);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 6ff73c4c126a..ae404c9a746c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/in6.h> 15#include <linux/in6.h>
16#include <linux/slab.h>
16#include <net/addrconf.h> 17#include <net/addrconf.h>
17#include <linux/if_addrlabel.h> 18#include <linux/if_addrlabel.h>
18#include <linux/netlink.h> 19#include <linux/netlink.h>
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 12e69d364dd5..3f9e86b15e0d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -36,6 +36,7 @@
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/stat.h> 37#include <linux/stat.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/slab.h>
39 40
40#include <linux/inet.h> 41#include <linux/inet.h>
41#include <linux/netdevice.h> 42#include <linux/netdevice.h>
@@ -199,7 +200,7 @@ lookup_protocol:
199 200
200 inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk); 201 inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk);
201 np->hop_limit = -1; 202 np->hop_limit = -1;
202 np->mcast_hops = -1; 203 np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
203 np->mc_loop = 1; 204 np->mc_loop = 1;
204 np->pmtudisc = IPV6_PMTUDISC_WANT; 205 np->pmtudisc = IPV6_PMTUDISC_WANT;
205 np->ipv6only = net->ipv6.sysctl.bindv6only; 206 np->ipv6only = net->ipv6.sysctl.bindv6only;
@@ -971,41 +972,41 @@ static void ipv6_packet_cleanup(void)
971 972
972static int __net_init ipv6_init_mibs(struct net *net) 973static int __net_init ipv6_init_mibs(struct net *net)
973{ 974{
974 if (snmp_mib_init((void **)net->mib.udp_stats_in6, 975 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
975 sizeof (struct udp_mib)) < 0) 976 sizeof (struct udp_mib)) < 0)
976 return -ENOMEM; 977 return -ENOMEM;
977 if (snmp_mib_init((void **)net->mib.udplite_stats_in6, 978 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
978 sizeof (struct udp_mib)) < 0) 979 sizeof (struct udp_mib)) < 0)
979 goto err_udplite_mib; 980 goto err_udplite_mib;
980 if (snmp_mib_init((void **)net->mib.ipv6_statistics, 981 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
981 sizeof(struct ipstats_mib)) < 0) 982 sizeof(struct ipstats_mib)) < 0)
982 goto err_ip_mib; 983 goto err_ip_mib;
983 if (snmp_mib_init((void **)net->mib.icmpv6_statistics, 984 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
984 sizeof(struct icmpv6_mib)) < 0) 985 sizeof(struct icmpv6_mib)) < 0)
985 goto err_icmp_mib; 986 goto err_icmp_mib;
986 if (snmp_mib_init((void **)net->mib.icmpv6msg_statistics, 987 if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
987 sizeof(struct icmpv6msg_mib)) < 0) 988 sizeof(struct icmpv6msg_mib)) < 0)
988 goto err_icmpmsg_mib; 989 goto err_icmpmsg_mib;
989 return 0; 990 return 0;
990 991
991err_icmpmsg_mib: 992err_icmpmsg_mib:
992 snmp_mib_free((void **)net->mib.icmpv6_statistics); 993 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
993err_icmp_mib: 994err_icmp_mib:
994 snmp_mib_free((void **)net->mib.ipv6_statistics); 995 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
995err_ip_mib: 996err_ip_mib:
996 snmp_mib_free((void **)net->mib.udplite_stats_in6); 997 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
997err_udplite_mib: 998err_udplite_mib:
998 snmp_mib_free((void **)net->mib.udp_stats_in6); 999 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
999 return -ENOMEM; 1000 return -ENOMEM;
1000} 1001}
1001 1002
1002static void __net_exit ipv6_cleanup_mibs(struct net *net) 1003static void ipv6_cleanup_mibs(struct net *net)
1003{ 1004{
1004 snmp_mib_free((void **)net->mib.udp_stats_in6); 1005 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
1005 snmp_mib_free((void **)net->mib.udplite_stats_in6); 1006 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
1006 snmp_mib_free((void **)net->mib.ipv6_statistics); 1007 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
1007 snmp_mib_free((void **)net->mib.icmpv6_statistics); 1008 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
1008 snmp_mib_free((void **)net->mib.icmpv6msg_statistics); 1009 snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
1009} 1010}
1010 1011
1011static int __net_init inet6_net_init(struct net *net) 1012static int __net_init inet6_net_init(struct net *net)
@@ -1042,7 +1043,7 @@ out:
1042#endif 1043#endif
1043} 1044}
1044 1045
1045static void inet6_net_exit(struct net *net) 1046static void __net_exit inet6_net_exit(struct net *net)
1046{ 1047{
1047#ifdef CONFIG_PROC_FS 1048#ifdef CONFIG_PROC_FS
1048 udp6_proc_exit(net); 1049 udp6_proc_exit(net);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index c2f300c314be..ee82d4ef26ce 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -26,6 +26,7 @@
26 26
27#include <crypto/hash.h> 27#include <crypto/hash.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/slab.h>
29#include <net/ip.h> 30#include <net/ip.h>
30#include <net/ah.h> 31#include <net/ah.h>
31#include <linux/crypto.h> 32#include <linux/crypto.h>
@@ -614,7 +615,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
614 type != ICMPV6_PKT_TOOBIG) 615 type != ICMPV6_PKT_TOOBIG)
615 return; 616 return;
616 617
617 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); 618 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
618 if (!x) 619 if (!x)
619 return; 620 return;
620 621
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index f1c74c8ef9de..b5b07054508a 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/proc_fs.h> 30#include <linux/proc_fs.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/slab.h>
32 33
33#include <net/net_namespace.h> 34#include <net/net_namespace.h>
34#include <net/sock.h> 35#include <net/sock.h>
@@ -538,7 +539,7 @@ static const struct file_operations ac6_seq_fops = {
538 .release = seq_release_net, 539 .release = seq_release_net,
539}; 540};
540 541
541int ac6_proc_init(struct net *net) 542int __net_init ac6_proc_init(struct net *net)
542{ 543{
543 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops)) 544 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops))
544 return -ENOMEM; 545 return -ENOMEM;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e6f9cdf780fe..622dc7939a1b 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -21,6 +21,7 @@
21#include <linux/in6.h> 21#include <linux/in6.h>
22#include <linux/ipv6.h> 22#include <linux/ipv6.h>
23#include <linux/route.h> 23#include <linux/route.h>
24#include <linux/slab.h>
24 25
25#include <net/ipv6.h> 26#include <net/ipv6.h>
26#include <net/ndisc.h> 27#include <net/ndisc.h>
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 668a46b655e6..ee9b93bdd6a2 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -365,7 +365,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
365 type != ICMPV6_PKT_TOOBIG) 365 type != ICMPV6_PKT_TOOBIG)
366 return; 366 return;
367 367
368 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 368 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
369 if (!x) 369 if (!x)
370 return; 370 return;
371 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 371 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 4bac362b1335..8a659f92d17a 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/in6.h> 30#include <linux/in6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/slab.h>
32 33
33#include <net/dst.h> 34#include <net/dst.h>
34#include <net/sock.h> 35#include <net/sock.h>
@@ -481,7 +482,7 @@ looped_back:
481 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 482 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
482 IPSTATS_MIB_INHDRERRORS); 483 IPSTATS_MIB_INHDRERRORS);
483 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 484 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
484 0, skb->dev); 485 0);
485 kfree_skb(skb); 486 kfree_skb(skb);
486 return -1; 487 return -1;
487 } 488 }
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b7aa7c64cc4a..5e463c43fcc2 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -84,18 +84,11 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
84 if ((rule->flags & FIB_RULE_FIND_SADDR) && 84 if ((rule->flags & FIB_RULE_FIND_SADDR) &&
85 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { 85 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
86 struct in6_addr saddr; 86 struct in6_addr saddr;
87 unsigned int srcprefs = 0;
88
89 if (flags & RT6_LOOKUP_F_SRCPREF_TMP)
90 srcprefs |= IPV6_PREFER_SRC_TMP;
91 if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC)
92 srcprefs |= IPV6_PREFER_SRC_PUBLIC;
93 if (flags & RT6_LOOKUP_F_SRCPREF_COA)
94 srcprefs |= IPV6_PREFER_SRC_COA;
95 87
96 if (ipv6_dev_get_saddr(net, 88 if (ipv6_dev_get_saddr(net,
97 ip6_dst_idev(&rt->u.dst)->dev, 89 ip6_dst_idev(&rt->u.dst)->dev,
98 &flp->fl6_dst, srcprefs, 90 &flp->fl6_dst,
91 rt6_flags2srcprefs(flags),
99 &saddr)) 92 &saddr))
100 goto again; 93 goto again;
101 if (!ipv6_prefix_equal(&saddr, &r->src.addr, 94 if (!ipv6_prefix_equal(&saddr, &r->src.addr,
@@ -262,7 +255,7 @@ static struct fib_rules_ops fib6_rules_ops_template = {
262 .fro_net = &init_net, 255 .fro_net = &init_net,
263}; 256};
264 257
265static int fib6_rules_net_init(struct net *net) 258static int __net_init fib6_rules_net_init(struct net *net)
266{ 259{
267 struct fib_rules_ops *ops; 260 struct fib_rules_ops *ops;
268 int err = -ENOMEM; 261 int err = -ENOMEM;
@@ -291,7 +284,7 @@ out_fib6_rules_ops:
291 goto out; 284 goto out;
292} 285}
293 286
294static void fib6_rules_net_exit(struct net *net) 287static void __net_exit fib6_rules_net_exit(struct net *net)
295{ 288{
296 fib_rules_unregister(net->ipv6.fib6_rules_ops); 289 fib_rules_unregister(net->ipv6.fib6_rules_ops);
297} 290}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4ae661bc3677..3330a4bd6157 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -40,6 +40,7 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/netfilter.h> 42#include <linux/netfilter.h>
43#include <linux/slab.h>
43 44
44#ifdef CONFIG_SYSCTL 45#ifdef CONFIG_SYSCTL
45#include <linux/sysctl.h> 46#include <linux/sysctl.h>
@@ -67,11 +68,6 @@
67#include <asm/uaccess.h> 68#include <asm/uaccess.h>
68#include <asm/system.h> 69#include <asm/system.h>
69 70
70DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71EXPORT_SYMBOL(icmpv6_statistics);
72DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
73EXPORT_SYMBOL(icmpv6msg_statistics);
74
75/* 71/*
76 * The ICMP socket(s). This is the most convenient way to flow control 72 * The ICMP socket(s). This is the most convenient way to flow control
77 * our ICMP output as well as maintain a clean interface throughout 73 * our ICMP output as well as maintain a clean interface throughout
@@ -119,7 +115,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
119 */ 115 */
120void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) 116void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
121{ 117{
122 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 118 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
123 kfree_skb(skb); 119 kfree_skb(skb);
124} 120}
125 121
@@ -305,8 +301,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
305/* 301/*
306 * Send an ICMP message in response to a packet in error 302 * Send an ICMP message in response to a packet in error
307 */ 303 */
308void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, 304void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
309 struct net_device *dev)
310{ 305{
311 struct net *net = dev_net(skb->dev); 306 struct net *net = dev_net(skb->dev);
312 struct inet6_dev *idev = NULL; 307 struct inet6_dev *idev = NULL;
@@ -951,7 +946,7 @@ ctl_table ipv6_icmp_table_template[] = {
951 { }, 946 { },
952}; 947};
953 948
954struct ctl_table *ipv6_icmp_sysctl_init(struct net *net) 949struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
955{ 950{
956 struct ctl_table *table; 951 struct ctl_table *table;
957 952
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 3516e6fe2e56..628db24bcf22 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -17,6 +17,7 @@
17#include <linux/in6.h> 17#include <linux/in6.h>
18#include <linux/ipv6.h> 18#include <linux/ipv6.h>
19#include <linux/jhash.h> 19#include <linux/jhash.h>
20#include <linux/slab.h>
20 21
21#include <net/addrconf.h> 22#include <net/addrconf.h>
22#include <net/inet_connection_sock.h> 23#include <net/inet_connection_sock.h>
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0e93ca56eb69..6b82e02158c6 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -26,6 +26,7 @@
26#include <linux/in6.h> 26#include <linux/in6.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/slab.h>
29 30
30#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
31#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
@@ -93,29 +94,20 @@ static __u32 rt_sernum;
93 94
94static void fib6_gc_timer_cb(unsigned long arg); 95static void fib6_gc_timer_cb(unsigned long arg);
95 96
96static struct fib6_walker_t fib6_walker_list = { 97static LIST_HEAD(fib6_walkers);
97 .prev = &fib6_walker_list, 98#define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
98 .next = &fib6_walker_list,
99};
100
101#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
102 99
103static inline void fib6_walker_link(struct fib6_walker_t *w) 100static inline void fib6_walker_link(struct fib6_walker_t *w)
104{ 101{
105 write_lock_bh(&fib6_walker_lock); 102 write_lock_bh(&fib6_walker_lock);
106 w->next = fib6_walker_list.next; 103 list_add(&w->lh, &fib6_walkers);
107 w->prev = &fib6_walker_list;
108 w->next->prev = w;
109 w->prev->next = w;
110 write_unlock_bh(&fib6_walker_lock); 104 write_unlock_bh(&fib6_walker_lock);
111} 105}
112 106
113static inline void fib6_walker_unlink(struct fib6_walker_t *w) 107static inline void fib6_walker_unlink(struct fib6_walker_t *w)
114{ 108{
115 write_lock_bh(&fib6_walker_lock); 109 write_lock_bh(&fib6_walker_lock);
116 w->next->prev = w->prev; 110 list_del(&w->lh);
117 w->prev->next = w->next;
118 w->prev = w->next = w;
119 write_unlock_bh(&fib6_walker_lock); 111 write_unlock_bh(&fib6_walker_lock);
120} 112}
121static __inline__ u32 fib6_new_sernum(void) 113static __inline__ u32 fib6_new_sernum(void)
@@ -239,7 +231,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
239 return NULL; 231 return NULL;
240} 232}
241 233
242static void fib6_tables_init(struct net *net) 234static void __net_init fib6_tables_init(struct net *net)
243{ 235{
244 fib6_link_table(net, net->ipv6.fib6_main_tbl); 236 fib6_link_table(net, net->ipv6.fib6_main_tbl);
245 fib6_link_table(net, net->ipv6.fib6_local_tbl); 237 fib6_link_table(net, net->ipv6.fib6_local_tbl);
@@ -262,7 +254,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
262 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags); 254 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
263} 255}
264 256
265static void fib6_tables_init(struct net *net) 257static void __net_init fib6_tables_init(struct net *net)
266{ 258{
267 fib6_link_table(net, net->ipv6.fib6_main_tbl); 259 fib6_link_table(net, net->ipv6.fib6_main_tbl);
268} 260}
@@ -319,12 +311,26 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
319 w->root = &table->tb6_root; 311 w->root = &table->tb6_root;
320 312
321 if (cb->args[4] == 0) { 313 if (cb->args[4] == 0) {
314 w->count = 0;
315 w->skip = 0;
316
322 read_lock_bh(&table->tb6_lock); 317 read_lock_bh(&table->tb6_lock);
323 res = fib6_walk(w); 318 res = fib6_walk(w);
324 read_unlock_bh(&table->tb6_lock); 319 read_unlock_bh(&table->tb6_lock);
325 if (res > 0) 320 if (res > 0) {
326 cb->args[4] = 1; 321 cb->args[4] = 1;
322 cb->args[5] = w->root->fn_sernum;
323 }
327 } else { 324 } else {
325 if (cb->args[5] != w->root->fn_sernum) {
326 /* Begin at the root if the tree changed */
327 cb->args[5] = w->root->fn_sernum;
328 w->state = FWS_INIT;
329 w->node = w->root;
330 w->skip = w->count;
331 } else
332 w->skip = 0;
333
328 read_lock_bh(&table->tb6_lock); 334 read_lock_bh(&table->tb6_lock);
329 res = fib6_walk_continue(w); 335 res = fib6_walk_continue(w);
330 read_unlock_bh(&table->tb6_lock); 336 read_unlock_bh(&table->tb6_lock);
@@ -1250,9 +1256,18 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
1250 w->leaf = fn->leaf; 1256 w->leaf = fn->leaf;
1251 case FWS_C: 1257 case FWS_C:
1252 if (w->leaf && fn->fn_flags&RTN_RTINFO) { 1258 if (w->leaf && fn->fn_flags&RTN_RTINFO) {
1253 int err = w->func(w); 1259 int err;
1260
1261 if (w->count < w->skip) {
1262 w->count++;
1263 continue;
1264 }
1265
1266 err = w->func(w);
1254 if (err) 1267 if (err)
1255 return err; 1268 return err;
1269
1270 w->count++;
1256 continue; 1271 continue;
1257 } 1272 }
1258 w->state = FWS_U; 1273 w->state = FWS_U;
@@ -1346,6 +1361,8 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1346 c.w.root = root; 1361 c.w.root = root;
1347 c.w.func = fib6_clean_node; 1362 c.w.func = fib6_clean_node;
1348 c.w.prune = prune; 1363 c.w.prune = prune;
1364 c.w.count = 0;
1365 c.w.skip = 0;
1349 c.func = func; 1366 c.func = func;
1350 c.arg = arg; 1367 c.arg = arg;
1351 c.net = net; 1368 c.net = net;
@@ -1469,7 +1486,7 @@ static void fib6_gc_timer_cb(unsigned long arg)
1469 fib6_run_gc(0, (struct net *)arg); 1486 fib6_run_gc(0, (struct net *)arg);
1470} 1487}
1471 1488
1472static int fib6_net_init(struct net *net) 1489static int __net_init fib6_net_init(struct net *net)
1473{ 1490{
1474 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); 1491 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1475 1492
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 6e7bffa2205e..14e23216eb28 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -20,6 +20,7 @@
20#include <linux/route.h> 20#include <linux/route.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/slab.h>
23 24
24#include <net/net_namespace.h> 25#include <net/net_namespace.h>
25#include <net/sock.h> 26#include <net/sock.h>
@@ -154,7 +155,7 @@ static void ip6_fl_gc(unsigned long dummy)
154 write_unlock(&ip6_fl_lock); 155 write_unlock(&ip6_fl_lock);
155} 156}
156 157
157static void ip6_fl_purge(struct net *net) 158static void __net_exit ip6_fl_purge(struct net *net)
158{ 159{
159 int i; 160 int i;
160 161
@@ -735,7 +736,7 @@ static const struct file_operations ip6fl_seq_fops = {
735 .release = seq_release_net, 736 .release = seq_release_net,
736}; 737};
737 738
738static int ip6_flowlabel_proc_init(struct net *net) 739static int __net_init ip6_flowlabel_proc_init(struct net *net)
739{ 740{
740 if (!proc_net_fops_create(net, "ip6_flowlabel", 741 if (!proc_net_fops_create(net, "ip6_flowlabel",
741 S_IRUGO, &ip6fl_seq_fops)) 742 S_IRUGO, &ip6fl_seq_fops))
@@ -743,7 +744,7 @@ static int ip6_flowlabel_proc_init(struct net *net)
743 return 0; 744 return 0;
744} 745}
745 746
746static void ip6_flowlabel_proc_fini(struct net *net) 747static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
747{ 748{
748 proc_net_remove(net, "ip6_flowlabel"); 749 proc_net_remove(net, "ip6_flowlabel");
749} 750}
@@ -754,11 +755,10 @@ static inline int ip6_flowlabel_proc_init(struct net *net)
754} 755}
755static inline void ip6_flowlabel_proc_fini(struct net *net) 756static inline void ip6_flowlabel_proc_fini(struct net *net)
756{ 757{
757 return ;
758} 758}
759#endif 759#endif
760 760
761static inline void ip6_flowlabel_net_exit(struct net *net) 761static void __net_exit ip6_flowlabel_net_exit(struct net *net)
762{ 762{
763 ip6_fl_purge(net); 763 ip6_fl_purge(net);
764 ip6_flowlabel_proc_fini(net); 764 ip6_flowlabel_proc_fini(net);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 237e2dba6e94..6aa7ee1295c2 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -28,6 +28,7 @@
28#include <linux/in6.h> 28#include <linux/in6.h>
29#include <linux/icmpv6.h> 29#include <linux/icmpv6.h>
30#include <linux/mroute6.h> 30#include <linux/mroute6.h>
31#include <linux/slab.h>
31 32
32#include <linux/netfilter.h> 33#include <linux/netfilter.h>
33#include <linux/netfilter_ipv6.h> 34#include <linux/netfilter_ipv6.h>
@@ -216,8 +217,7 @@ resubmit:
216 IP6_INC_STATS_BH(net, idev, 217 IP6_INC_STATS_BH(net, idev,
217 IPSTATS_MIB_INUNKNOWNPROTOS); 218 IPSTATS_MIB_INUNKNOWNPROTOS);
218 icmpv6_send(skb, ICMPV6_PARAMPROB, 219 icmpv6_send(skb, ICMPV6_PARAMPROB,
219 ICMPV6_UNK_NEXTHDR, nhoff, 220 ICMPV6_UNK_NEXTHDR, nhoff);
220 skb->dev);
221 } 221 }
222 } else 222 } else
223 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); 223 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index eb6d09728633..75d5ef830097 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -37,6 +37,7 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/route.h> 38#include <linux/route.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/slab.h>
40 41
41#include <linux/netfilter.h> 42#include <linux/netfilter.h>
42#include <linux/netfilter_ipv6.h> 43#include <linux/netfilter_ipv6.h>
@@ -107,7 +108,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
107 newskb->ip_summed = CHECKSUM_UNNECESSARY; 108 newskb->ip_summed = CHECKSUM_UNNECESSARY;
108 WARN_ON(!skb_dst(newskb)); 109 WARN_ON(!skb_dst(newskb));
109 110
110 netif_rx(newskb); 111 netif_rx_ni(newskb);
111 return 0; 112 return 0;
112} 113}
113 114
@@ -267,7 +268,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
267 if (net_ratelimit()) 268 if (net_ratelimit())
268 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); 269 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
269 skb->dev = dst->dev; 270 skb->dev = dst->dev;
270 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 271 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
271 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); 272 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
272 kfree_skb(skb); 273 kfree_skb(skb);
273 return -EMSGSIZE; 274 return -EMSGSIZE;
@@ -402,6 +403,7 @@ int ip6_forward(struct sk_buff *skb)
402 struct ipv6hdr *hdr = ipv6_hdr(skb); 403 struct ipv6hdr *hdr = ipv6_hdr(skb);
403 struct inet6_skb_parm *opt = IP6CB(skb); 404 struct inet6_skb_parm *opt = IP6CB(skb);
404 struct net *net = dev_net(dst->dev); 405 struct net *net = dev_net(dst->dev);
406 u32 mtu;
405 407
406 if (net->ipv6.devconf_all->forwarding == 0) 408 if (net->ipv6.devconf_all->forwarding == 0)
407 goto error; 409 goto error;
@@ -441,8 +443,7 @@ int ip6_forward(struct sk_buff *skb)
441 if (hdr->hop_limit <= 1) { 443 if (hdr->hop_limit <= 1) {
442 /* Force OUTPUT device used as source address */ 444 /* Force OUTPUT device used as source address */
443 skb->dev = dst->dev; 445 skb->dev = dst->dev;
444 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 446 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
445 0, skb->dev);
446 IP6_INC_STATS_BH(net, 447 IP6_INC_STATS_BH(net,
447 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); 448 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
448 449
@@ -504,15 +505,19 @@ int ip6_forward(struct sk_buff *skb)
504 goto error; 505 goto error;
505 if (addrtype & IPV6_ADDR_LINKLOCAL) { 506 if (addrtype & IPV6_ADDR_LINKLOCAL) {
506 icmpv6_send(skb, ICMPV6_DEST_UNREACH, 507 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
507 ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); 508 ICMPV6_NOT_NEIGHBOUR, 0);
508 goto error; 509 goto error;
509 } 510 }
510 } 511 }
511 512
512 if (skb->len > dst_mtu(dst)) { 513 mtu = dst_mtu(dst);
514 if (mtu < IPV6_MIN_MTU)
515 mtu = IPV6_MIN_MTU;
516
517 if (skb->len > mtu) {
513 /* Again, force OUTPUT device used as source address */ 518 /* Again, force OUTPUT device used as source address */
514 skb->dev = dst->dev; 519 skb->dev = dst->dev;
515 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); 520 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
516 IP6_INC_STATS_BH(net, 521 IP6_INC_STATS_BH(net,
517 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); 522 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
518 IP6_INC_STATS_BH(net, 523 IP6_INC_STATS_BH(net,
@@ -622,12 +627,11 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
622 mtu = ip6_skb_dst_mtu(skb); 627 mtu = ip6_skb_dst_mtu(skb);
623 628
624 /* We must not fragment if the socket is set to force MTU discovery 629 /* We must not fragment if the socket is set to force MTU discovery
625 * or if the skb it not generated by a local socket. (This last 630 * or if the skb it not generated by a local socket.
626 * check should be redundant, but it's free.)
627 */ 631 */
628 if (!skb->local_df) { 632 if (!skb->local_df && skb->len > mtu) {
629 skb->dev = skb_dst(skb)->dev; 633 skb->dev = skb_dst(skb)->dev;
630 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 634 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
631 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 635 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
632 IPSTATS_MIB_FRAGFAILS); 636 IPSTATS_MIB_FRAGFAILS);
633 kfree_skb(skb); 637 kfree_skb(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d453d07b0dfe..2599870747ec 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -37,6 +37,7 @@
37#include <linux/route.h> 37#include <linux/route.h>
38#include <linux/rtnetlink.h> 38#include <linux/rtnetlink.h>
39#include <linux/netfilter_ipv6.h> 39#include <linux/netfilter_ipv6.h>
40#include <linux/slab.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/atomic.h> 43#include <asm/atomic.h>
@@ -74,7 +75,6 @@ MODULE_LICENSE("GPL");
74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ 75 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
75 (HASH_SIZE - 1)) 76 (HASH_SIZE - 1))
76 77
77static void ip6_fb_tnl_dev_init(struct net_device *dev);
78static void ip6_tnl_dev_init(struct net_device *dev); 78static void ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 79static void ip6_tnl_dev_setup(struct net_device *dev);
80 80
@@ -623,7 +623,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
623 if (rt && rt->rt6i_dev) 623 if (rt && rt->rt6i_dev)
624 skb2->dev = rt->rt6i_dev; 624 skb2->dev = rt->rt6i_dev;
625 625
626 icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); 626 icmpv6_send(skb2, rel_type, rel_code, rel_info);
627 627
628 if (rt) 628 if (rt)
629 dst_release(&rt->u.dst); 629 dst_release(&rt->u.dst);
@@ -1015,7 +1015,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1015 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1015 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
1016 if (tel->encap_limit == 0) { 1016 if (tel->encap_limit == 0) {
1017 icmpv6_send(skb, ICMPV6_PARAMPROB, 1017 icmpv6_send(skb, ICMPV6_PARAMPROB,
1018 ICMPV6_HDR_FIELD, offset + 2, skb->dev); 1018 ICMPV6_HDR_FIELD, offset + 2);
1019 return -1; 1019 return -1;
1020 } 1020 }
1021 encap_limit = tel->encap_limit - 1; 1021 encap_limit = tel->encap_limit - 1;
@@ -1034,7 +1034,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1034 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); 1034 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
1035 if (err != 0) { 1035 if (err != 0) {
1036 if (err == -EMSGSIZE) 1036 if (err == -EMSGSIZE)
1037 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 1037 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1038 return -1; 1038 return -1;
1039 } 1039 }
1040 1040
@@ -1364,7 +1364,7 @@ static void ip6_tnl_dev_init(struct net_device *dev)
1364 * Return: 0 1364 * Return: 0
1365 **/ 1365 **/
1366 1366
1367static void ip6_fb_tnl_dev_init(struct net_device *dev) 1367static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1368{ 1368{
1369 struct ip6_tnl *t = netdev_priv(dev); 1369 struct ip6_tnl *t = netdev_priv(dev);
1370 struct net *net = dev_net(dev); 1370 struct net *net = dev_net(dev);
@@ -1388,7 +1388,7 @@ static struct xfrm6_tunnel ip6ip6_handler = {
1388 .priority = 1, 1388 .priority = 1,
1389}; 1389};
1390 1390
1391static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) 1391static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1392{ 1392{
1393 int h; 1393 int h;
1394 struct ip6_tnl *t; 1394 struct ip6_tnl *t;
@@ -1407,7 +1407,7 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1407 unregister_netdevice_many(&list); 1407 unregister_netdevice_many(&list);
1408} 1408}
1409 1409
1410static int ip6_tnl_init_net(struct net *net) 1410static int __net_init ip6_tnl_init_net(struct net *net)
1411{ 1411{
1412 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1412 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1413 int err; 1413 int err;
@@ -1436,7 +1436,7 @@ err_alloc_dev:
1436 return err; 1436 return err;
1437} 1437}
1438 1438
1439static void ip6_tnl_exit_net(struct net *net) 1439static void __net_exit ip6_tnl_exit_net(struct net *net)
1440{ 1440{
1441 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1441 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1442 1442
@@ -1462,27 +1462,29 @@ static int __init ip6_tunnel_init(void)
1462{ 1462{
1463 int err; 1463 int err;
1464 1464
1465 if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) { 1465 err = register_pernet_device(&ip6_tnl_net_ops);
1466 if (err < 0)
1467 goto out_pernet;
1468
1469 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1470 if (err < 0) {
1466 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); 1471 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
1467 err = -EAGAIN; 1472 goto out_ip4ip6;
1468 goto out;
1469 } 1473 }
1470 1474
1471 if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) { 1475 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
1476 if (err < 0) {
1472 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); 1477 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
1473 err = -EAGAIN; 1478 goto out_ip6ip6;
1474 goto unreg_ip4ip6;
1475 } 1479 }
1476 1480
1477 err = register_pernet_device(&ip6_tnl_net_ops);
1478 if (err < 0)
1479 goto err_pernet;
1480 return 0; 1481 return 0;
1481err_pernet: 1482
1482 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 1483out_ip6ip6:
1483unreg_ip4ip6:
1484 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 1484 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1485out: 1485out_ip4ip6:
1486 unregister_pernet_device(&ip6_tnl_net_ops);
1487out_pernet:
1486 return err; 1488 return err;
1487} 1489}
1488 1490
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 52e0f74fdfe0..3e333268db89 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -33,6 +33,7 @@
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h>
36#include <net/protocol.h> 37#include <net/protocol.h>
37#include <linux/skbuff.h> 38#include <linux/skbuff.h>
38#include <net/sock.h> 39#include <net/sock.h>
@@ -1113,6 +1114,9 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1113 unsigned char ttls[MAXMIFS]; 1114 unsigned char ttls[MAXMIFS];
1114 int i; 1115 int i;
1115 1116
1117 if (mfc->mf6cc_parent >= MAXMIFS)
1118 return -ENFILE;
1119
1116 memset(ttls, 255, MAXMIFS); 1120 memset(ttls, 255, MAXMIFS);
1117 for (i = 0; i < MAXMIFS; i++) { 1121 for (i = 0; i < MAXMIFS; i++) {
1118 if (IF_ISSET(i, &mfc->mf6cc_ifset)) 1122 if (IF_ISSET(i, &mfc->mf6cc_ifset))
@@ -1692,17 +1696,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1692 int ct; 1696 int ct;
1693 struct rtnexthop *nhp; 1697 struct rtnexthop *nhp;
1694 struct net *net = mfc6_net(c); 1698 struct net *net = mfc6_net(c);
1695 struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
1696 u8 *b = skb_tail_pointer(skb); 1699 u8 *b = skb_tail_pointer(skb);
1697 struct rtattr *mp_head; 1700 struct rtattr *mp_head;
1698 1701
1699 if (dev) 1702 /* If cache is unresolved, don't try to parse IIF and OIF */
1700 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1703 if (c->mf6c_parent > MAXMIFS)
1704 return -ENOENT;
1705
1706 if (MIF_EXISTS(net, c->mf6c_parent))
1707 RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
1701 1708
1702 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1709 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1703 1710
1704 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1711 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1705 if (c->mfc_un.res.ttls[ct] < 255) { 1712 if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
1706 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1713 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1707 goto rtattr_failure; 1714 goto rtattr_failure;
1708 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1715 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 002e6eef9120..85cccd6ed0b7 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -53,6 +53,7 @@
53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
54 u8 type, u8 code, int offset, __be32 info) 54 u8 type, u8 code, int offset, __be32 info)
55{ 55{
56 struct net *net = dev_net(skb->dev);
56 __be32 spi; 57 __be32 spi;
57 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
58 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
@@ -63,7 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
63 return; 64 return;
64 65
65 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
66 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
67 if (!x) 68 if (!x)
68 return; 69 return;
69 70
@@ -74,14 +75,15 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
74 75
75static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) 76static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
76{ 77{
78 struct net *net = xs_net(x);
77 struct xfrm_state *t = NULL; 79 struct xfrm_state *t = NULL;
78 80
79 t = xfrm_state_alloc(&init_net); 81 t = xfrm_state_alloc(net);
80 if (!t) 82 if (!t)
81 goto out; 83 goto out;
82 84
83 t->id.proto = IPPROTO_IPV6; 85 t->id.proto = IPPROTO_IPV6;
84 t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr); 86 t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
85 if (!t->id.spi) 87 if (!t->id.spi)
86 goto error; 88 goto error;
87 89
@@ -90,6 +92,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
90 t->props.family = AF_INET6; 92 t->props.family = AF_INET6;
91 t->props.mode = x->props.mode; 93 t->props.mode = x->props.mode;
92 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); 94 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr));
95 memcpy(&t->mark, &x->mark, sizeof(t->mark));
93 96
94 if (xfrm_init_state(t)) 97 if (xfrm_init_state(t))
95 goto error; 98 goto error;
@@ -108,13 +111,15 @@ error:
108 111
109static int ipcomp6_tunnel_attach(struct xfrm_state *x) 112static int ipcomp6_tunnel_attach(struct xfrm_state *x)
110{ 113{
114 struct net *net = xs_net(x);
111 int err = 0; 115 int err = 0;
112 struct xfrm_state *t = NULL; 116 struct xfrm_state *t = NULL;
113 __be32 spi; 117 __be32 spi;
118 u32 mark = x->mark.m & x->mark.v;
114 119
115 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr); 120 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr);
116 if (spi) 121 if (spi)
117 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr, 122 t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr,
118 spi, IPPROTO_IPV6, AF_INET6); 123 spi, IPPROTO_IPV6, AF_INET6);
119 if (!t) { 124 if (!t) {
120 t = ipcomp6_tunnel_create(x); 125 t = ipcomp6_tunnel_create(x);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 430454ee5ead..33f60fca7aa7 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/sysctl.h> 37#include <linux/sysctl.h>
38#include <linux/netfilter.h> 38#include <linux/netfilter.h>
39#include <linux/slab.h>
39 40
40#include <net/sock.h> 41#include <net/sock.h>
41#include <net/snmp.h> 42#include <net/snmp.h>
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1f9c44442e65..c483ab9fd67b 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -43,6 +43,7 @@
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
45#include <linux/seq_file.h> 45#include <linux/seq_file.h>
46#include <linux/slab.h>
46 47
47#include <linux/netfilter.h> 48#include <linux/netfilter.h>
48#include <linux/netfilter_ipv6.h> 49#include <linux/netfilter_ipv6.h>
@@ -793,10 +794,10 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
793 } 794 }
794 spin_unlock_bh(&im->mca_lock); 795 spin_unlock_bh(&im->mca_lock);
795 796
796 write_lock_bh(&idev->mc_lock); 797 spin_lock_bh(&idev->mc_lock);
797 pmc->next = idev->mc_tomb; 798 pmc->next = idev->mc_tomb;
798 idev->mc_tomb = pmc; 799 idev->mc_tomb = pmc;
799 write_unlock_bh(&idev->mc_lock); 800 spin_unlock_bh(&idev->mc_lock);
800} 801}
801 802
802static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca) 803static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
@@ -804,7 +805,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
804 struct ifmcaddr6 *pmc, *pmc_prev; 805 struct ifmcaddr6 *pmc, *pmc_prev;
805 struct ip6_sf_list *psf, *psf_next; 806 struct ip6_sf_list *psf, *psf_next;
806 807
807 write_lock_bh(&idev->mc_lock); 808 spin_lock_bh(&idev->mc_lock);
808 pmc_prev = NULL; 809 pmc_prev = NULL;
809 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { 810 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
810 if (ipv6_addr_equal(&pmc->mca_addr, pmca)) 811 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
@@ -817,7 +818,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
817 else 818 else
818 idev->mc_tomb = pmc->next; 819 idev->mc_tomb = pmc->next;
819 } 820 }
820 write_unlock_bh(&idev->mc_lock); 821 spin_unlock_bh(&idev->mc_lock);
822
821 if (pmc) { 823 if (pmc) {
822 for (psf=pmc->mca_tomb; psf; psf=psf_next) { 824 for (psf=pmc->mca_tomb; psf; psf=psf_next) {
823 psf_next = psf->sf_next; 825 psf_next = psf->sf_next;
@@ -832,10 +834,10 @@ static void mld_clear_delrec(struct inet6_dev *idev)
832{ 834{
833 struct ifmcaddr6 *pmc, *nextpmc; 835 struct ifmcaddr6 *pmc, *nextpmc;
834 836
835 write_lock_bh(&idev->mc_lock); 837 spin_lock_bh(&idev->mc_lock);
836 pmc = idev->mc_tomb; 838 pmc = idev->mc_tomb;
837 idev->mc_tomb = NULL; 839 idev->mc_tomb = NULL;
838 write_unlock_bh(&idev->mc_lock); 840 spin_unlock_bh(&idev->mc_lock);
839 841
840 for (; pmc; pmc = nextpmc) { 842 for (; pmc; pmc = nextpmc) {
841 nextpmc = pmc->next; 843 nextpmc = pmc->next;
@@ -1696,7 +1698,7 @@ static void mld_send_cr(struct inet6_dev *idev)
1696 int type, dtype; 1698 int type, dtype;
1697 1699
1698 read_lock_bh(&idev->lock); 1700 read_lock_bh(&idev->lock);
1699 write_lock_bh(&idev->mc_lock); 1701 spin_lock(&idev->mc_lock);
1700 1702
1701 /* deleted MCA's */ 1703 /* deleted MCA's */
1702 pmc_prev = NULL; 1704 pmc_prev = NULL;
@@ -1730,7 +1732,7 @@ static void mld_send_cr(struct inet6_dev *idev)
1730 } else 1732 } else
1731 pmc_prev = pmc; 1733 pmc_prev = pmc;
1732 } 1734 }
1733 write_unlock_bh(&idev->mc_lock); 1735 spin_unlock(&idev->mc_lock);
1734 1736
1735 /* change recs */ 1737 /* change recs */
1736 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 1738 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
@@ -2311,7 +2313,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
2311void ipv6_mc_init_dev(struct inet6_dev *idev) 2313void ipv6_mc_init_dev(struct inet6_dev *idev)
2312{ 2314{
2313 write_lock_bh(&idev->lock); 2315 write_lock_bh(&idev->lock);
2314 rwlock_init(&idev->mc_lock); 2316 spin_lock_init(&idev->mc_lock);
2315 idev->mc_gq_running = 0; 2317 idev->mc_gq_running = 0;
2316 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire, 2318 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2317 (unsigned long)idev); 2319 (unsigned long)idev);
@@ -2646,7 +2648,7 @@ static const struct file_operations igmp6_mcf_seq_fops = {
2646 .release = seq_release_net, 2648 .release = seq_release_net,
2647}; 2649};
2648 2650
2649static int igmp6_proc_init(struct net *net) 2651static int __net_init igmp6_proc_init(struct net *net)
2650{ 2652{
2651 int err; 2653 int err;
2652 2654
@@ -2666,23 +2668,22 @@ out_proc_net_igmp6:
2666 goto out; 2668 goto out;
2667} 2669}
2668 2670
2669static void igmp6_proc_exit(struct net *net) 2671static void __net_exit igmp6_proc_exit(struct net *net)
2670{ 2672{
2671 proc_net_remove(net, "mcfilter6"); 2673 proc_net_remove(net, "mcfilter6");
2672 proc_net_remove(net, "igmp6"); 2674 proc_net_remove(net, "igmp6");
2673} 2675}
2674#else 2676#else
2675static int igmp6_proc_init(struct net *net) 2677static inline int igmp6_proc_init(struct net *net)
2676{ 2678{
2677 return 0; 2679 return 0;
2678} 2680}
2679static void igmp6_proc_exit(struct net *net) 2681static inline void igmp6_proc_exit(struct net *net)
2680{ 2682{
2681 ;
2682} 2683}
2683#endif 2684#endif
2684 2685
2685static int igmp6_net_init(struct net *net) 2686static int __net_init igmp6_net_init(struct net *net)
2686{ 2687{
2687 int err; 2688 int err;
2688 2689
@@ -2708,7 +2709,7 @@ out_sock_create:
2708 goto out; 2709 goto out;
2709} 2710}
2710 2711
2711static void igmp6_net_exit(struct net *net) 2712static void __net_exit igmp6_net_exit(struct net *net)
2712{ 2713{
2713 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2714 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2714 igmp6_proc_exit(net); 2715 igmp6_proc_exit(net);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f797e8c6f3b3..2794b6002836 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -56,7 +56,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
56 56
57static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos) 57static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
58{ 58{
59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
60} 60}
61 61
62static int mip6_mh_len(int type) 62static int mip6_mh_len(int type)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c45852798092..da0a4d2adc69 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -59,6 +59,7 @@
59#include <linux/route.h> 59#include <linux/route.h>
60#include <linux/init.h> 60#include <linux/init.h>
61#include <linux/rcupdate.h> 61#include <linux/rcupdate.h>
62#include <linux/slab.h>
62#ifdef CONFIG_SYSCTL 63#ifdef CONFIG_SYSCTL
63#include <linux/sysctl.h> 64#include <linux/sysctl.h>
64#endif 65#endif
@@ -1772,7 +1773,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *bu
1772 1773
1773#endif 1774#endif
1774 1775
1775static int ndisc_net_init(struct net *net) 1776static int __net_init ndisc_net_init(struct net *net)
1776{ 1777{
1777 struct ipv6_pinfo *np; 1778 struct ipv6_pinfo *np;
1778 struct sock *sk; 1779 struct sock *sk;
@@ -1797,7 +1798,7 @@ static int ndisc_net_init(struct net *net)
1797 return 0; 1798 return 0;
1798} 1799}
1799 1800
1800static void ndisc_net_exit(struct net *net) 1801static void __net_exit ndisc_net_exit(struct net *net)
1801{ 1802{
1802 inet_ctl_sock_destroy(net->ipv6.ndisc_sk); 1803 inet_ctl_sock_destroy(net->ipv6.ndisc_sk);
1803} 1804}
@@ -1820,8 +1821,7 @@ int __init ndisc_init(void)
1820 neigh_table_init(&nd_tbl); 1821 neigh_table_init(&nd_tbl);
1821 1822
1822#ifdef CONFIG_SYSCTL 1823#ifdef CONFIG_SYSCTL
1823 err = neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, 1824 err = neigh_sysctl_register(NULL, &nd_tbl.parms, "ipv6",
1824 NET_IPV6_NEIGH, "ipv6",
1825 &ndisc_ifinfo_sysctl_change); 1825 &ndisc_ifinfo_sysctl_change);
1826 if (err) 1826 if (err)
1827 goto out_unregister_pernet; 1827 goto out_unregister_pernet;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 7854052be60b..6a68a74d14a3 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -25,6 +25,7 @@
25#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/slab.h>
28#include <net/net_namespace.h> 29#include <net/net_namespace.h>
29#include <net/sock.h> 30#include <net/sock.h>
30#include <net/ipv6.h> 31#include <net/ipv6.h>
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 8a7e0f52e177..9210e312edf1 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -29,6 +29,7 @@
29#include <linux/netfilter_ipv6/ip6_tables.h> 29#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <linux/netfilter/x_tables.h> 30#include <linux/netfilter/x_tables.h>
31#include <net/netfilter/nf_log.h> 31#include <net/netfilter/nf_log.h>
32#include "../../netfilter/xt_repldata.h"
32 33
33MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -67,6 +68,12 @@ do { \
67#define inline 68#define inline
68#endif 69#endif
69 70
71void *ip6t_alloc_initial_table(const struct xt_table *info)
72{
73 return xt_alloc_initial_table(ip6t, IP6T);
74}
75EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
76
70/* 77/*
71 We keep a set of rules for each CPU, so we can avoid write-locking 78 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore 79 them in the softirq when updating the counters and therefore
@@ -201,7 +208,7 @@ ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
201 208
202/* Performance critical - called for every packet */ 209/* Performance critical - called for every packet */
203static inline bool 210static inline bool
204do_match(struct ip6t_entry_match *m, const struct sk_buff *skb, 211do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par) 212 struct xt_match_param *par)
206{ 213{
207 par->match = m->u.kernel.match; 214 par->match = m->u.kernel.match;
@@ -215,7 +222,7 @@ do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215} 222}
216 223
217static inline struct ip6t_entry * 224static inline struct ip6t_entry *
218get_entry(void *base, unsigned int offset) 225get_entry(const void *base, unsigned int offset)
219{ 226{
220 return (struct ip6t_entry *)(base + offset); 227 return (struct ip6t_entry *)(base + offset);
221} 228}
@@ -229,6 +236,12 @@ static inline bool unconditional(const struct ip6t_ip6 *ipv6)
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; 236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
230} 237}
231 238
239static inline const struct ip6t_entry_target *
240ip6t_get_target_c(const struct ip6t_entry *e)
241{
242 return ip6t_get_target((struct ip6t_entry *)e);
243}
244
232#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 245#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234/* This cries for unification! */ 247/* This cries for unification! */
@@ -264,11 +277,11 @@ static struct nf_loginfo trace_loginfo = {
264 277
265/* Mildly perf critical (only if packet tracing is on) */ 278/* Mildly perf critical (only if packet tracing is on) */
266static inline int 279static inline int
267get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e, 280get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
268 const char *hookname, const char **chainname, 281 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum) 282 const char **comment, unsigned int *rulenum)
270{ 283{
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s); 284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
272 285
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) { 286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */ 287 /* Head of user chain: ERROR target with chainname */
@@ -294,17 +307,18 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
294 return 0; 307 return 0;
295} 308}
296 309
297static void trace_packet(struct sk_buff *skb, 310static void trace_packet(const struct sk_buff *skb,
298 unsigned int hook, 311 unsigned int hook,
299 const struct net_device *in, 312 const struct net_device *in,
300 const struct net_device *out, 313 const struct net_device *out,
301 const char *tablename, 314 const char *tablename,
302 struct xt_table_info *private, 315 const struct xt_table_info *private,
303 struct ip6t_entry *e) 316 const struct ip6t_entry *e)
304{ 317{
305 void *table_base; 318 const void *table_base;
306 const struct ip6t_entry *root; 319 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment; 320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
308 unsigned int rulenum = 0; 322 unsigned int rulenum = 0;
309 323
310 table_base = private->entries[smp_processor_id()]; 324 table_base = private->entries[smp_processor_id()];
@@ -313,10 +327,10 @@ static void trace_packet(struct sk_buff *skb,
313 hookname = chainname = hooknames[hook]; 327 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE]; 328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
315 329
316 IP6T_ENTRY_ITERATE(root, 330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
317 private->size - private->hook_entry[hook], 331 if (get_chainname_rulenum(iter, e, hookname,
318 get_chainname_rulenum, 332 &chainname, &comment, &rulenum) != 0)
319 e, hookname, &chainname, &comment, &rulenum); 333 break;
320 334
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, 335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ", 336 "TRACE: %s:%s:%s:%u ",
@@ -345,9 +359,9 @@ ip6t_do_table(struct sk_buff *skb,
345 /* Initializing verdict to NF_DROP keeps gcc happy. */ 359 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP; 360 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev; 361 const char *indev, *outdev;
348 void *table_base; 362 const void *table_base;
349 struct ip6t_entry *e, *back; 363 struct ip6t_entry *e, *back;
350 struct xt_table_info *private; 364 const struct xt_table_info *private;
351 struct xt_match_param mtpar; 365 struct xt_match_param mtpar;
352 struct xt_target_param tgpar; 366 struct xt_target_param tgpar;
353 367
@@ -378,22 +392,27 @@ ip6t_do_table(struct sk_buff *skb,
378 back = get_entry(table_base, private->underflow[hook]); 392 back = get_entry(table_base, private->underflow[hook]);
379 393
380 do { 394 do {
381 struct ip6t_entry_target *t; 395 const struct ip6t_entry_target *t;
396 const struct xt_entry_match *ematch;
382 397
383 IP_NF_ASSERT(e); 398 IP_NF_ASSERT(e);
384 IP_NF_ASSERT(back); 399 IP_NF_ASSERT(back);
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, 400 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) || 401 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 402 no_match:
388 e = ip6t_next_entry(e); 403 e = ip6t_next_entry(e);
389 continue; 404 continue;
390 } 405 }
391 406
407 xt_ematch_foreach(ematch, e)
408 if (do_match(ematch, skb, &mtpar) != 0)
409 goto no_match;
410
392 ADD_COUNTER(e->counters, 411 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) + 412 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1); 413 sizeof(struct ipv6hdr), 1);
395 414
396 t = ip6t_get_target(e); 415 t = ip6t_get_target_c(e);
397 IP_NF_ASSERT(t->u.kernel.target); 416 IP_NF_ASSERT(t->u.kernel.target);
398 417
399#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 418#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
@@ -475,7 +494,7 @@ ip6t_do_table(struct sk_buff *skb,
475/* Figures out from what hook each rule can be called: returns 0 if 494/* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */ 495 there are loops. Puts hook bitmask in comefrom. */
477static int 496static int
478mark_source_chains(struct xt_table_info *newinfo, 497mark_source_chains(const struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0) 498 unsigned int valid_hooks, void *entry0)
480{ 499{
481 unsigned int hook; 500 unsigned int hook;
@@ -493,8 +512,8 @@ mark_source_chains(struct xt_table_info *newinfo,
493 e->counters.pcnt = pos; 512 e->counters.pcnt = pos;
494 513
495 for (;;) { 514 for (;;) {
496 struct ip6t_standard_target *t 515 const struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e); 516 = (void *)ip6t_get_target_c(e);
498 int visited = e->comefrom & (1 << hook); 517 int visited = e->comefrom & (1 << hook);
499 518
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 519 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -584,27 +603,23 @@ mark_source_chains(struct xt_table_info *newinfo,
584 return 1; 603 return 1;
585} 604}
586 605
587static int 606static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
588cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
589{ 607{
590 struct xt_mtdtor_param par; 608 struct xt_mtdtor_param par;
591 609
592 if (i && (*i)-- == 0) 610 par.net = net;
593 return 1;
594
595 par.match = m->u.kernel.match; 611 par.match = m->u.kernel.match;
596 par.matchinfo = m->data; 612 par.matchinfo = m->data;
597 par.family = NFPROTO_IPV6; 613 par.family = NFPROTO_IPV6;
598 if (par.match->destroy != NULL) 614 if (par.match->destroy != NULL)
599 par.match->destroy(&par); 615 par.match->destroy(&par);
600 module_put(par.match->me); 616 module_put(par.match->me);
601 return 0;
602} 617}
603 618
604static int 619static int
605check_entry(struct ip6t_entry *e, const char *name) 620check_entry(const struct ip6t_entry *e, const char *name)
606{ 621{
607 struct ip6t_entry_target *t; 622 const struct ip6t_entry_target *t;
608 623
609 if (!ip6_checkentry(&e->ipv6)) { 624 if (!ip6_checkentry(&e->ipv6)) {
610 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -615,15 +630,14 @@ check_entry(struct ip6t_entry *e, const char *name)
615 e->next_offset) 630 e->next_offset)
616 return -EINVAL; 631 return -EINVAL;
617 632
618 t = ip6t_get_target(e); 633 t = ip6t_get_target_c(e);
619 if (e->target_offset + t->u.target_size > e->next_offset) 634 if (e->target_offset + t->u.target_size > e->next_offset)
620 return -EINVAL; 635 return -EINVAL;
621 636
622 return 0; 637 return 0;
623} 638}
624 639
625static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 640static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
626 unsigned int *i)
627{ 641{
628 const struct ip6t_ip6 *ipv6 = par->entryinfo; 642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
629 int ret; 643 int ret;
@@ -638,13 +652,11 @@ static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
638 par.match->name); 652 par.match->name);
639 return ret; 653 return ret;
640 } 654 }
641 ++*i;
642 return 0; 655 return 0;
643} 656}
644 657
645static int 658static int
646find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 659find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
647 unsigned int *i)
648{ 660{
649 struct xt_match *match; 661 struct xt_match *match;
650 int ret; 662 int ret;
@@ -658,7 +670,7 @@ find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
658 } 670 }
659 m->u.kernel.match = match; 671 m->u.kernel.match = match;
660 672
661 ret = check_match(m, par, i); 673 ret = check_match(m, par);
662 if (ret) 674 if (ret)
663 goto err; 675 goto err;
664 676
@@ -668,10 +680,11 @@ err:
668 return ret; 680 return ret;
669} 681}
670 682
671static int check_target(struct ip6t_entry *e, const char *name) 683static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
672{ 684{
673 struct ip6t_entry_target *t = ip6t_get_target(e); 685 struct ip6t_entry_target *t = ip6t_get_target(e);
674 struct xt_tgchk_param par = { 686 struct xt_tgchk_param par = {
687 .net = net,
675 .table = name, 688 .table = name,
676 .entryinfo = e, 689 .entryinfo = e,
677 .target = t->u.kernel.target, 690 .target = t->u.kernel.target,
@@ -693,27 +706,32 @@ static int check_target(struct ip6t_entry *e, const char *name)
693} 706}
694 707
695static int 708static int
696find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, 709find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
697 unsigned int *i) 710 unsigned int size)
698{ 711{
699 struct ip6t_entry_target *t; 712 struct ip6t_entry_target *t;
700 struct xt_target *target; 713 struct xt_target *target;
701 int ret; 714 int ret;
702 unsigned int j; 715 unsigned int j;
703 struct xt_mtchk_param mtpar; 716 struct xt_mtchk_param mtpar;
717 struct xt_entry_match *ematch;
704 718
705 ret = check_entry(e, name); 719 ret = check_entry(e, name);
706 if (ret) 720 if (ret)
707 return ret; 721 return ret;
708 722
709 j = 0; 723 j = 0;
724 mtpar.net = net;
710 mtpar.table = name; 725 mtpar.table = name;
711 mtpar.entryinfo = &e->ipv6; 726 mtpar.entryinfo = &e->ipv6;
712 mtpar.hook_mask = e->comefrom; 727 mtpar.hook_mask = e->comefrom;
713 mtpar.family = NFPROTO_IPV6; 728 mtpar.family = NFPROTO_IPV6;
714 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 729 xt_ematch_foreach(ematch, e) {
715 if (ret != 0) 730 ret = find_check_match(ematch, &mtpar);
716 goto cleanup_matches; 731 if (ret != 0)
732 goto cleanup_matches;
733 ++j;
734 }
717 735
718 t = ip6t_get_target(e); 736 t = ip6t_get_target(e);
719 target = try_then_request_module(xt_find_target(AF_INET6, 737 target = try_then_request_module(xt_find_target(AF_INET6,
@@ -727,27 +745,29 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
727 } 745 }
728 t->u.kernel.target = target; 746 t->u.kernel.target = target;
729 747
730 ret = check_target(e, name); 748 ret = check_target(e, net, name);
731 if (ret) 749 if (ret)
732 goto err; 750 goto err;
733
734 (*i)++;
735 return 0; 751 return 0;
736 err: 752 err:
737 module_put(t->u.kernel.target->me); 753 module_put(t->u.kernel.target->me);
738 cleanup_matches: 754 cleanup_matches:
739 IP6T_MATCH_ITERATE(e, cleanup_match, &j); 755 xt_ematch_foreach(ematch, e) {
756 if (j-- == 0)
757 break;
758 cleanup_match(ematch, net);
759 }
740 return ret; 760 return ret;
741} 761}
742 762
743static bool check_underflow(struct ip6t_entry *e) 763static bool check_underflow(const struct ip6t_entry *e)
744{ 764{
745 const struct ip6t_entry_target *t; 765 const struct ip6t_entry_target *t;
746 unsigned int verdict; 766 unsigned int verdict;
747 767
748 if (!unconditional(&e->ipv6)) 768 if (!unconditional(&e->ipv6))
749 return false; 769 return false;
750 t = ip6t_get_target(e); 770 t = ip6t_get_target_c(e);
751 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 771 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
752 return false; 772 return false;
753 verdict = ((struct ip6t_standard_target *)t)->verdict; 773 verdict = ((struct ip6t_standard_target *)t)->verdict;
@@ -758,12 +778,11 @@ static bool check_underflow(struct ip6t_entry *e)
758static int 778static int
759check_entry_size_and_hooks(struct ip6t_entry *e, 779check_entry_size_and_hooks(struct ip6t_entry *e,
760 struct xt_table_info *newinfo, 780 struct xt_table_info *newinfo,
761 unsigned char *base, 781 const unsigned char *base,
762 unsigned char *limit, 782 const unsigned char *limit,
763 const unsigned int *hook_entries, 783 const unsigned int *hook_entries,
764 const unsigned int *underflows, 784 const unsigned int *underflows,
765 unsigned int valid_hooks, 785 unsigned int valid_hooks)
766 unsigned int *i)
767{ 786{
768 unsigned int h; 787 unsigned int h;
769 788
@@ -800,50 +819,41 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
800 /* Clear counters and comefrom */ 819 /* Clear counters and comefrom */
801 e->counters = ((struct xt_counters) { 0, 0 }); 820 e->counters = ((struct xt_counters) { 0, 0 });
802 e->comefrom = 0; 821 e->comefrom = 0;
803
804 (*i)++;
805 return 0; 822 return 0;
806} 823}
807 824
808static int 825static void cleanup_entry(struct ip6t_entry *e, struct net *net)
809cleanup_entry(struct ip6t_entry *e, unsigned int *i)
810{ 826{
811 struct xt_tgdtor_param par; 827 struct xt_tgdtor_param par;
812 struct ip6t_entry_target *t; 828 struct ip6t_entry_target *t;
813 829 struct xt_entry_match *ematch;
814 if (i && (*i)-- == 0)
815 return 1;
816 830
817 /* Cleanup all matches */ 831 /* Cleanup all matches */
818 IP6T_MATCH_ITERATE(e, cleanup_match, NULL); 832 xt_ematch_foreach(ematch, e)
833 cleanup_match(ematch, net);
819 t = ip6t_get_target(e); 834 t = ip6t_get_target(e);
820 835
836 par.net = net;
821 par.target = t->u.kernel.target; 837 par.target = t->u.kernel.target;
822 par.targinfo = t->data; 838 par.targinfo = t->data;
823 par.family = NFPROTO_IPV6; 839 par.family = NFPROTO_IPV6;
824 if (par.target->destroy != NULL) 840 if (par.target->destroy != NULL)
825 par.target->destroy(&par); 841 par.target->destroy(&par);
826 module_put(par.target->me); 842 module_put(par.target->me);
827 return 0;
828} 843}
829 844
830/* Checks and translates the user-supplied table segment (held in 845/* Checks and translates the user-supplied table segment (held in
831 newinfo) */ 846 newinfo) */
832static int 847static int
833translate_table(const char *name, 848translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
834 unsigned int valid_hooks, 849 const struct ip6t_replace *repl)
835 struct xt_table_info *newinfo,
836 void *entry0,
837 unsigned int size,
838 unsigned int number,
839 const unsigned int *hook_entries,
840 const unsigned int *underflows)
841{ 850{
851 struct ip6t_entry *iter;
842 unsigned int i; 852 unsigned int i;
843 int ret; 853 int ret = 0;
844 854
845 newinfo->size = size; 855 newinfo->size = repl->size;
846 newinfo->number = number; 856 newinfo->number = repl->num_entries;
847 857
848 /* Init all hooks to impossible value. */ 858 /* Init all hooks to impossible value. */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 859 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -854,49 +864,58 @@ translate_table(const char *name,
854 duprintf("translate_table: size %u\n", newinfo->size); 864 duprintf("translate_table: size %u\n", newinfo->size);
855 i = 0; 865 i = 0;
856 /* Walk through entries, checking offsets. */ 866 /* Walk through entries, checking offsets. */
857 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 867 xt_entry_foreach(iter, entry0, newinfo->size) {
858 check_entry_size_and_hooks, 868 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
859 newinfo, 869 entry0 + repl->size,
860 entry0, 870 repl->hook_entry,
861 entry0 + size, 871 repl->underflow,
862 hook_entries, underflows, valid_hooks, &i); 872 repl->valid_hooks);
863 if (ret != 0) 873 if (ret != 0)
864 return ret; 874 return ret;
875 ++i;
876 }
865 877
866 if (i != number) { 878 if (i != repl->num_entries) {
867 duprintf("translate_table: %u not %u entries\n", 879 duprintf("translate_table: %u not %u entries\n",
868 i, number); 880 i, repl->num_entries);
869 return -EINVAL; 881 return -EINVAL;
870 } 882 }
871 883
872 /* Check hooks all assigned */ 884 /* Check hooks all assigned */
873 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 885 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
874 /* Only hooks which are valid */ 886 /* Only hooks which are valid */
875 if (!(valid_hooks & (1 << i))) 887 if (!(repl->valid_hooks & (1 << i)))
876 continue; 888 continue;
877 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 889 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
878 duprintf("Invalid hook entry %u %u\n", 890 duprintf("Invalid hook entry %u %u\n",
879 i, hook_entries[i]); 891 i, repl->hook_entry[i]);
880 return -EINVAL; 892 return -EINVAL;
881 } 893 }
882 if (newinfo->underflow[i] == 0xFFFFFFFF) { 894 if (newinfo->underflow[i] == 0xFFFFFFFF) {
883 duprintf("Invalid underflow %u %u\n", 895 duprintf("Invalid underflow %u %u\n",
884 i, underflows[i]); 896 i, repl->underflow[i]);
885 return -EINVAL; 897 return -EINVAL;
886 } 898 }
887 } 899 }
888 900
889 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 901 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
890 return -ELOOP; 902 return -ELOOP;
891 903
892 /* Finally, each sanity check must pass */ 904 /* Finally, each sanity check must pass */
893 i = 0; 905 i = 0;
894 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 906 xt_entry_foreach(iter, entry0, newinfo->size) {
895 find_check_entry, name, size, &i); 907 ret = find_check_entry(iter, net, repl->name, repl->size);
908 if (ret != 0)
909 break;
910 ++i;
911 }
896 912
897 if (ret != 0) { 913 if (ret != 0) {
898 IP6T_ENTRY_ITERATE(entry0, newinfo->size, 914 xt_entry_foreach(iter, entry0, newinfo->size) {
899 cleanup_entry, &i); 915 if (i-- == 0)
916 break;
917 cleanup_entry(iter, net);
918 }
900 return ret; 919 return ret;
901 } 920 }
902 921
@@ -909,33 +928,11 @@ translate_table(const char *name,
909 return ret; 928 return ret;
910} 929}
911 930
912/* Gets counters. */
913static inline int
914add_entry_to_counter(const struct ip6t_entry *e,
915 struct xt_counters total[],
916 unsigned int *i)
917{
918 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
919
920 (*i)++;
921 return 0;
922}
923
924static inline int
925set_entry_to_counter(const struct ip6t_entry *e,
926 struct ip6t_counters total[],
927 unsigned int *i)
928{
929 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
930
931 (*i)++;
932 return 0;
933}
934
935static void 931static void
936get_counters(const struct xt_table_info *t, 932get_counters(const struct xt_table_info *t,
937 struct xt_counters counters[]) 933 struct xt_counters counters[])
938{ 934{
935 struct ip6t_entry *iter;
939 unsigned int cpu; 936 unsigned int cpu;
940 unsigned int i; 937 unsigned int i;
941 unsigned int curcpu; 938 unsigned int curcpu;
@@ -951,32 +948,32 @@ get_counters(const struct xt_table_info *t,
951 curcpu = smp_processor_id(); 948 curcpu = smp_processor_id();
952 949
953 i = 0; 950 i = 0;
954 IP6T_ENTRY_ITERATE(t->entries[curcpu], 951 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
955 t->size, 952 SET_COUNTER(counters[i], iter->counters.bcnt,
956 set_entry_to_counter, 953 iter->counters.pcnt);
957 counters, 954 ++i;
958 &i); 955 }
959 956
960 for_each_possible_cpu(cpu) { 957 for_each_possible_cpu(cpu) {
961 if (cpu == curcpu) 958 if (cpu == curcpu)
962 continue; 959 continue;
963 i = 0; 960 i = 0;
964 xt_info_wrlock(cpu); 961 xt_info_wrlock(cpu);
965 IP6T_ENTRY_ITERATE(t->entries[cpu], 962 xt_entry_foreach(iter, t->entries[cpu], t->size) {
966 t->size, 963 ADD_COUNTER(counters[i], iter->counters.bcnt,
967 add_entry_to_counter, 964 iter->counters.pcnt);
968 counters, 965 ++i;
969 &i); 966 }
970 xt_info_wrunlock(cpu); 967 xt_info_wrunlock(cpu);
971 } 968 }
972 local_bh_enable(); 969 local_bh_enable();
973} 970}
974 971
975static struct xt_counters *alloc_counters(struct xt_table *table) 972static struct xt_counters *alloc_counters(const struct xt_table *table)
976{ 973{
977 unsigned int countersize; 974 unsigned int countersize;
978 struct xt_counters *counters; 975 struct xt_counters *counters;
979 struct xt_table_info *private = table->private; 976 const struct xt_table_info *private = table->private;
980 977
981 /* We need atomic snapshot of counters: rest doesn't change 978 /* We need atomic snapshot of counters: rest doesn't change
982 (other than comefrom, which userspace doesn't care 979 (other than comefrom, which userspace doesn't care
@@ -994,11 +991,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
994 991
995static int 992static int
996copy_entries_to_user(unsigned int total_size, 993copy_entries_to_user(unsigned int total_size,
997 struct xt_table *table, 994 const struct xt_table *table,
998 void __user *userptr) 995 void __user *userptr)
999{ 996{
1000 unsigned int off, num; 997 unsigned int off, num;
1001 struct ip6t_entry *e; 998 const struct ip6t_entry *e;
1002 struct xt_counters *counters; 999 struct xt_counters *counters;
1003 const struct xt_table_info *private = table->private; 1000 const struct xt_table_info *private = table->private;
1004 int ret = 0; 1001 int ret = 0;
@@ -1050,7 +1047,7 @@ copy_entries_to_user(unsigned int total_size,
1050 } 1047 }
1051 } 1048 }
1052 1049
1053 t = ip6t_get_target(e); 1050 t = ip6t_get_target_c(e);
1054 if (copy_to_user(userptr + off + e->target_offset 1051 if (copy_to_user(userptr + off + e->target_offset
1055 + offsetof(struct ip6t_entry_target, 1052 + offsetof(struct ip6t_entry_target,
1056 u.user.name), 1053 u.user.name),
@@ -1067,7 +1064,7 @@ copy_entries_to_user(unsigned int total_size,
1067} 1064}
1068 1065
1069#ifdef CONFIG_COMPAT 1066#ifdef CONFIG_COMPAT
1070static void compat_standard_from_user(void *dst, void *src) 1067static void compat_standard_from_user(void *dst, const void *src)
1071{ 1068{
1072 int v = *(compat_int_t *)src; 1069 int v = *(compat_int_t *)src;
1073 1070
@@ -1076,7 +1073,7 @@ static void compat_standard_from_user(void *dst, void *src)
1076 memcpy(dst, &v, sizeof(v)); 1073 memcpy(dst, &v, sizeof(v));
1077} 1074}
1078 1075
1079static int compat_standard_to_user(void __user *dst, void *src) 1076static int compat_standard_to_user(void __user *dst, const void *src)
1080{ 1077{
1081 compat_int_t cv = *(int *)src; 1078 compat_int_t cv = *(int *)src;
1082 1079
@@ -1085,25 +1082,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
1085 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1082 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1086} 1083}
1087 1084
1088static inline int 1085static int compat_calc_entry(const struct ip6t_entry *e,
1089compat_calc_match(struct ip6t_entry_match *m, int *size)
1090{
1091 *size += xt_compat_match_offset(m->u.kernel.match);
1092 return 0;
1093}
1094
1095static int compat_calc_entry(struct ip6t_entry *e,
1096 const struct xt_table_info *info, 1086 const struct xt_table_info *info,
1097 void *base, struct xt_table_info *newinfo) 1087 const void *base, struct xt_table_info *newinfo)
1098{ 1088{
1099 struct ip6t_entry_target *t; 1089 const struct xt_entry_match *ematch;
1090 const struct ip6t_entry_target *t;
1100 unsigned int entry_offset; 1091 unsigned int entry_offset;
1101 int off, i, ret; 1092 int off, i, ret;
1102 1093
1103 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1094 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1104 entry_offset = (void *)e - base; 1095 entry_offset = (void *)e - base;
1105 IP6T_MATCH_ITERATE(e, compat_calc_match, &off); 1096 xt_ematch_foreach(ematch, e)
1106 t = ip6t_get_target(e); 1097 off += xt_compat_match_offset(ematch->u.kernel.match);
1098 t = ip6t_get_target_c(e);
1107 off += xt_compat_target_offset(t->u.kernel.target); 1099 off += xt_compat_target_offset(t->u.kernel.target);
1108 newinfo->size -= off; 1100 newinfo->size -= off;
1109 ret = xt_compat_add_offset(AF_INET6, entry_offset, off); 1101 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
@@ -1124,7 +1116,9 @@ static int compat_calc_entry(struct ip6t_entry *e,
1124static int compat_table_info(const struct xt_table_info *info, 1116static int compat_table_info(const struct xt_table_info *info,
1125 struct xt_table_info *newinfo) 1117 struct xt_table_info *newinfo)
1126{ 1118{
1119 struct ip6t_entry *iter;
1127 void *loc_cpu_entry; 1120 void *loc_cpu_entry;
1121 int ret;
1128 1122
1129 if (!newinfo || !info) 1123 if (!newinfo || !info)
1130 return -EINVAL; 1124 return -EINVAL;
@@ -1133,13 +1127,17 @@ static int compat_table_info(const struct xt_table_info *info,
1133 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1127 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1134 newinfo->initial_entries = 0; 1128 newinfo->initial_entries = 0;
1135 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1129 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1136 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size, 1130 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1137 compat_calc_entry, info, loc_cpu_entry, 1131 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1138 newinfo); 1132 if (ret != 0)
1133 return ret;
1134 }
1135 return 0;
1139} 1136}
1140#endif 1137#endif
1141 1138
1142static int get_info(struct net *net, void __user *user, int *len, int compat) 1139static int get_info(struct net *net, void __user *user,
1140 const int *len, int compat)
1143{ 1141{
1144 char name[IP6T_TABLE_MAXNAMELEN]; 1142 char name[IP6T_TABLE_MAXNAMELEN];
1145 struct xt_table *t; 1143 struct xt_table *t;
@@ -1199,7 +1197,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1199} 1197}
1200 1198
1201static int 1199static int
1202get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len) 1200get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1201 const int *len)
1203{ 1202{
1204 int ret; 1203 int ret;
1205 struct ip6t_get_entries get; 1204 struct ip6t_get_entries get;
@@ -1247,6 +1246,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1247 struct xt_table_info *oldinfo; 1246 struct xt_table_info *oldinfo;
1248 struct xt_counters *counters; 1247 struct xt_counters *counters;
1249 const void *loc_cpu_old_entry; 1248 const void *loc_cpu_old_entry;
1249 struct ip6t_entry *iter;
1250 1250
1251 ret = 0; 1251 ret = 0;
1252 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1252 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1290,8 +1290,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1290 1290
1291 /* Decrease module usage counts and free resource */ 1291 /* Decrease module usage counts and free resource */
1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1293 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1293 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1294 NULL); 1294 cleanup_entry(iter, net);
1295
1295 xt_free_table_info(oldinfo); 1296 xt_free_table_info(oldinfo);
1296 if (copy_to_user(counters_ptr, counters, 1297 if (copy_to_user(counters_ptr, counters,
1297 sizeof(struct xt_counters) * num_counters) != 0) 1298 sizeof(struct xt_counters) * num_counters) != 0)
@@ -1310,12 +1311,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1310} 1311}
1311 1312
1312static int 1313static int
1313do_replace(struct net *net, void __user *user, unsigned int len) 1314do_replace(struct net *net, const void __user *user, unsigned int len)
1314{ 1315{
1315 int ret; 1316 int ret;
1316 struct ip6t_replace tmp; 1317 struct ip6t_replace tmp;
1317 struct xt_table_info *newinfo; 1318 struct xt_table_info *newinfo;
1318 void *loc_cpu_entry; 1319 void *loc_cpu_entry;
1320 struct ip6t_entry *iter;
1319 1321
1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1322 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1321 return -EFAULT; 1323 return -EFAULT;
@@ -1336,9 +1338,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1336 goto free_newinfo; 1338 goto free_newinfo;
1337 } 1339 }
1338 1340
1339 ret = translate_table(tmp.name, tmp.valid_hooks, 1341 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1340 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1341 tmp.hook_entry, tmp.underflow);
1342 if (ret != 0) 1342 if (ret != 0)
1343 goto free_newinfo; 1343 goto free_newinfo;
1344 1344
@@ -1351,27 +1351,15 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1351 return 0; 1351 return 0;
1352 1352
1353 free_newinfo_untrans: 1353 free_newinfo_untrans:
1354 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1354 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1355 cleanup_entry(iter, net);
1355 free_newinfo: 1356 free_newinfo:
1356 xt_free_table_info(newinfo); 1357 xt_free_table_info(newinfo);
1357 return ret; 1358 return ret;
1358} 1359}
1359 1360
1360/* We're lazy, and add to the first CPU; overflow works its fey magic
1361 * and everything is OK. */
1362static int 1361static int
1363add_counter_to_entry(struct ip6t_entry *e, 1362do_add_counters(struct net *net, const void __user *user, unsigned int len,
1364 const struct xt_counters addme[],
1365 unsigned int *i)
1366{
1367 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1368
1369 (*i)++;
1370 return 0;
1371}
1372
1373static int
1374do_add_counters(struct net *net, void __user *user, unsigned int len,
1375 int compat) 1363 int compat)
1376{ 1364{
1377 unsigned int i, curcpu; 1365 unsigned int i, curcpu;
@@ -1385,6 +1373,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1385 const struct xt_table_info *private; 1373 const struct xt_table_info *private;
1386 int ret = 0; 1374 int ret = 0;
1387 const void *loc_cpu_entry; 1375 const void *loc_cpu_entry;
1376 struct ip6t_entry *iter;
1388#ifdef CONFIG_COMPAT 1377#ifdef CONFIG_COMPAT
1389 struct compat_xt_counters_info compat_tmp; 1378 struct compat_xt_counters_info compat_tmp;
1390 1379
@@ -1443,11 +1432,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1443 curcpu = smp_processor_id(); 1432 curcpu = smp_processor_id();
1444 xt_info_wrlock(curcpu); 1433 xt_info_wrlock(curcpu);
1445 loc_cpu_entry = private->entries[curcpu]; 1434 loc_cpu_entry = private->entries[curcpu];
1446 IP6T_ENTRY_ITERATE(loc_cpu_entry, 1435 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1447 private->size, 1436 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1448 add_counter_to_entry, 1437 ++i;
1449 paddc, 1438 }
1450 &i);
1451 xt_info_wrunlock(curcpu); 1439 xt_info_wrunlock(curcpu);
1452 1440
1453 unlock_up_free: 1441 unlock_up_free:
@@ -1476,45 +1464,40 @@ struct compat_ip6t_replace {
1476static int 1464static int
1477compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, 1465compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1478 unsigned int *size, struct xt_counters *counters, 1466 unsigned int *size, struct xt_counters *counters,
1479 unsigned int *i) 1467 unsigned int i)
1480{ 1468{
1481 struct ip6t_entry_target *t; 1469 struct ip6t_entry_target *t;
1482 struct compat_ip6t_entry __user *ce; 1470 struct compat_ip6t_entry __user *ce;
1483 u_int16_t target_offset, next_offset; 1471 u_int16_t target_offset, next_offset;
1484 compat_uint_t origsize; 1472 compat_uint_t origsize;
1485 int ret; 1473 const struct xt_entry_match *ematch;
1474 int ret = 0;
1486 1475
1487 ret = -EFAULT;
1488 origsize = *size; 1476 origsize = *size;
1489 ce = (struct compat_ip6t_entry __user *)*dstptr; 1477 ce = (struct compat_ip6t_entry __user *)*dstptr;
1490 if (copy_to_user(ce, e, sizeof(struct ip6t_entry))) 1478 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1491 goto out; 1479 copy_to_user(&ce->counters, &counters[i],
1492 1480 sizeof(counters[i])) != 0)
1493 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1481 return -EFAULT;
1494 goto out;
1495 1482
1496 *dstptr += sizeof(struct compat_ip6t_entry); 1483 *dstptr += sizeof(struct compat_ip6t_entry);
1497 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1484 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1498 1485
1499 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1486 xt_ematch_foreach(ematch, e) {
1487 ret = xt_compat_match_to_user(ematch, dstptr, size);
1488 if (ret != 0)
1489 return ret;
1490 }
1500 target_offset = e->target_offset - (origsize - *size); 1491 target_offset = e->target_offset - (origsize - *size);
1501 if (ret)
1502 goto out;
1503 t = ip6t_get_target(e); 1492 t = ip6t_get_target(e);
1504 ret = xt_compat_target_to_user(t, dstptr, size); 1493 ret = xt_compat_target_to_user(t, dstptr, size);
1505 if (ret) 1494 if (ret)
1506 goto out; 1495 return ret;
1507 ret = -EFAULT;
1508 next_offset = e->next_offset - (origsize - *size); 1496 next_offset = e->next_offset - (origsize - *size);
1509 if (put_user(target_offset, &ce->target_offset)) 1497 if (put_user(target_offset, &ce->target_offset) != 0 ||
1510 goto out; 1498 put_user(next_offset, &ce->next_offset) != 0)
1511 if (put_user(next_offset, &ce->next_offset)) 1499 return -EFAULT;
1512 goto out;
1513
1514 (*i)++;
1515 return 0; 1500 return 0;
1516out:
1517 return ret;
1518} 1501}
1519 1502
1520static int 1503static int
@@ -1522,7 +1505,7 @@ compat_find_calc_match(struct ip6t_entry_match *m,
1522 const char *name, 1505 const char *name,
1523 const struct ip6t_ip6 *ipv6, 1506 const struct ip6t_ip6 *ipv6,
1524 unsigned int hookmask, 1507 unsigned int hookmask,
1525 int *size, unsigned int *i) 1508 int *size)
1526{ 1509{
1527 struct xt_match *match; 1510 struct xt_match *match;
1528 1511
@@ -1536,47 +1519,32 @@ compat_find_calc_match(struct ip6t_entry_match *m,
1536 } 1519 }
1537 m->u.kernel.match = match; 1520 m->u.kernel.match = match;
1538 *size += xt_compat_match_offset(match); 1521 *size += xt_compat_match_offset(match);
1539
1540 (*i)++;
1541 return 0;
1542}
1543
1544static int
1545compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1546{
1547 if (i && (*i)-- == 0)
1548 return 1;
1549
1550 module_put(m->u.kernel.match->me);
1551 return 0; 1522 return 0;
1552} 1523}
1553 1524
1554static int 1525static void compat_release_entry(struct compat_ip6t_entry *e)
1555compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1556{ 1526{
1557 struct ip6t_entry_target *t; 1527 struct ip6t_entry_target *t;
1558 1528 struct xt_entry_match *ematch;
1559 if (i && (*i)-- == 0)
1560 return 1;
1561 1529
1562 /* Cleanup all matches */ 1530 /* Cleanup all matches */
1563 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL); 1531 xt_ematch_foreach(ematch, e)
1532 module_put(ematch->u.kernel.match->me);
1564 t = compat_ip6t_get_target(e); 1533 t = compat_ip6t_get_target(e);
1565 module_put(t->u.kernel.target->me); 1534 module_put(t->u.kernel.target->me);
1566 return 0;
1567} 1535}
1568 1536
1569static int 1537static int
1570check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, 1538check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1571 struct xt_table_info *newinfo, 1539 struct xt_table_info *newinfo,
1572 unsigned int *size, 1540 unsigned int *size,
1573 unsigned char *base, 1541 const unsigned char *base,
1574 unsigned char *limit, 1542 const unsigned char *limit,
1575 unsigned int *hook_entries, 1543 const unsigned int *hook_entries,
1576 unsigned int *underflows, 1544 const unsigned int *underflows,
1577 unsigned int *i,
1578 const char *name) 1545 const char *name)
1579{ 1546{
1547 struct xt_entry_match *ematch;
1580 struct ip6t_entry_target *t; 1548 struct ip6t_entry_target *t;
1581 struct xt_target *target; 1549 struct xt_target *target;
1582 unsigned int entry_offset; 1550 unsigned int entry_offset;
@@ -1605,10 +1573,13 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1605 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1573 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1606 entry_offset = (void *)e - (void *)base; 1574 entry_offset = (void *)e - (void *)base;
1607 j = 0; 1575 j = 0;
1608 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name, 1576 xt_ematch_foreach(ematch, e) {
1609 &e->ipv6, e->comefrom, &off, &j); 1577 ret = compat_find_calc_match(ematch, name,
1610 if (ret != 0) 1578 &e->ipv6, e->comefrom, &off);
1611 goto release_matches; 1579 if (ret != 0)
1580 goto release_matches;
1581 ++j;
1582 }
1612 1583
1613 t = compat_ip6t_get_target(e); 1584 t = compat_ip6t_get_target(e);
1614 target = try_then_request_module(xt_find_target(AF_INET6, 1585 target = try_then_request_module(xt_find_target(AF_INET6,
@@ -1640,14 +1611,16 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1640 /* Clear counters and comefrom */ 1611 /* Clear counters and comefrom */
1641 memset(&e->counters, 0, sizeof(e->counters)); 1612 memset(&e->counters, 0, sizeof(e->counters));
1642 e->comefrom = 0; 1613 e->comefrom = 0;
1643
1644 (*i)++;
1645 return 0; 1614 return 0;
1646 1615
1647out: 1616out:
1648 module_put(t->u.kernel.target->me); 1617 module_put(t->u.kernel.target->me);
1649release_matches: 1618release_matches:
1650 IP6T_MATCH_ITERATE(e, compat_release_match, &j); 1619 xt_ematch_foreach(ematch, e) {
1620 if (j-- == 0)
1621 break;
1622 module_put(ematch->u.kernel.match->me);
1623 }
1651 return ret; 1624 return ret;
1652} 1625}
1653 1626
@@ -1661,6 +1634,7 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1661 struct ip6t_entry *de; 1634 struct ip6t_entry *de;
1662 unsigned int origsize; 1635 unsigned int origsize;
1663 int ret, h; 1636 int ret, h;
1637 struct xt_entry_match *ematch;
1664 1638
1665 ret = 0; 1639 ret = 0;
1666 origsize = *size; 1640 origsize = *size;
@@ -1671,10 +1645,11 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1671 *dstptr += sizeof(struct ip6t_entry); 1645 *dstptr += sizeof(struct ip6t_entry);
1672 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1646 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1673 1647
1674 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user, 1648 xt_ematch_foreach(ematch, e) {
1675 dstptr, size); 1649 ret = xt_compat_match_from_user(ematch, dstptr, size);
1676 if (ret) 1650 if (ret != 0)
1677 return ret; 1651 return ret;
1652 }
1678 de->target_offset = e->target_offset - (origsize - *size); 1653 de->target_offset = e->target_offset - (origsize - *size);
1679 t = compat_ip6t_get_target(e); 1654 t = compat_ip6t_get_target(e);
1680 target = t->u.kernel.target; 1655 target = t->u.kernel.target;
@@ -1690,36 +1665,44 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1690 return ret; 1665 return ret;
1691} 1666}
1692 1667
1693static int compat_check_entry(struct ip6t_entry *e, const char *name, 1668static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1694 unsigned int *i) 1669 const char *name)
1695{ 1670{
1696 unsigned int j; 1671 unsigned int j;
1697 int ret; 1672 int ret = 0;
1698 struct xt_mtchk_param mtpar; 1673 struct xt_mtchk_param mtpar;
1674 struct xt_entry_match *ematch;
1699 1675
1700 j = 0; 1676 j = 0;
1677 mtpar.net = net;
1701 mtpar.table = name; 1678 mtpar.table = name;
1702 mtpar.entryinfo = &e->ipv6; 1679 mtpar.entryinfo = &e->ipv6;
1703 mtpar.hook_mask = e->comefrom; 1680 mtpar.hook_mask = e->comefrom;
1704 mtpar.family = NFPROTO_IPV6; 1681 mtpar.family = NFPROTO_IPV6;
1705 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j); 1682 xt_ematch_foreach(ematch, e) {
1706 if (ret) 1683 ret = check_match(ematch, &mtpar);
1707 goto cleanup_matches; 1684 if (ret != 0)
1685 goto cleanup_matches;
1686 ++j;
1687 }
1708 1688
1709 ret = check_target(e, name); 1689 ret = check_target(e, net, name);
1710 if (ret) 1690 if (ret)
1711 goto cleanup_matches; 1691 goto cleanup_matches;
1712
1713 (*i)++;
1714 return 0; 1692 return 0;
1715 1693
1716 cleanup_matches: 1694 cleanup_matches:
1717 IP6T_MATCH_ITERATE(e, cleanup_match, &j); 1695 xt_ematch_foreach(ematch, e) {
1696 if (j-- == 0)
1697 break;
1698 cleanup_match(ematch, net);
1699 }
1718 return ret; 1700 return ret;
1719} 1701}
1720 1702
1721static int 1703static int
1722translate_compat_table(const char *name, 1704translate_compat_table(struct net *net,
1705 const char *name,
1723 unsigned int valid_hooks, 1706 unsigned int valid_hooks,
1724 struct xt_table_info **pinfo, 1707 struct xt_table_info **pinfo,
1725 void **pentry0, 1708 void **pentry0,
@@ -1731,8 +1714,10 @@ translate_compat_table(const char *name,
1731 unsigned int i, j; 1714 unsigned int i, j;
1732 struct xt_table_info *newinfo, *info; 1715 struct xt_table_info *newinfo, *info;
1733 void *pos, *entry0, *entry1; 1716 void *pos, *entry0, *entry1;
1717 struct compat_ip6t_entry *iter0;
1718 struct ip6t_entry *iter1;
1734 unsigned int size; 1719 unsigned int size;
1735 int ret; 1720 int ret = 0;
1736 1721
1737 info = *pinfo; 1722 info = *pinfo;
1738 entry0 = *pentry0; 1723 entry0 = *pentry0;
@@ -1749,13 +1734,17 @@ translate_compat_table(const char *name,
1749 j = 0; 1734 j = 0;
1750 xt_compat_lock(AF_INET6); 1735 xt_compat_lock(AF_INET6);
1751 /* Walk through entries, checking offsets. */ 1736 /* Walk through entries, checking offsets. */
1752 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1737 xt_entry_foreach(iter0, entry0, total_size) {
1753 check_compat_entry_size_and_hooks, 1738 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1754 info, &size, entry0, 1739 entry0,
1755 entry0 + total_size, 1740 entry0 + total_size,
1756 hook_entries, underflows, &j, name); 1741 hook_entries,
1757 if (ret != 0) 1742 underflows,
1758 goto out_unlock; 1743 name);
1744 if (ret != 0)
1745 goto out_unlock;
1746 ++j;
1747 }
1759 1748
1760 ret = -EINVAL; 1749 ret = -EINVAL;
1761 if (j != number) { 1750 if (j != number) {
@@ -1794,9 +1783,12 @@ translate_compat_table(const char *name,
1794 entry1 = newinfo->entries[raw_smp_processor_id()]; 1783 entry1 = newinfo->entries[raw_smp_processor_id()];
1795 pos = entry1; 1784 pos = entry1;
1796 size = total_size; 1785 size = total_size;
1797 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1786 xt_entry_foreach(iter0, entry0, total_size) {
1798 compat_copy_entry_from_user, 1787 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1799 &pos, &size, name, newinfo, entry1); 1788 name, newinfo, entry1);
1789 if (ret != 0)
1790 break;
1791 }
1800 xt_compat_flush_offsets(AF_INET6); 1792 xt_compat_flush_offsets(AF_INET6);
1801 xt_compat_unlock(AF_INET6); 1793 xt_compat_unlock(AF_INET6);
1802 if (ret) 1794 if (ret)
@@ -1807,13 +1799,32 @@ translate_compat_table(const char *name,
1807 goto free_newinfo; 1799 goto free_newinfo;
1808 1800
1809 i = 0; 1801 i = 0;
1810 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1802 xt_entry_foreach(iter1, entry1, newinfo->size) {
1811 name, &i); 1803 ret = compat_check_entry(iter1, net, name);
1804 if (ret != 0)
1805 break;
1806 ++i;
1807 }
1812 if (ret) { 1808 if (ret) {
1809 /*
1810 * The first i matches need cleanup_entry (calls ->destroy)
1811 * because they had called ->check already. The other j-i
1812 * entries need only release.
1813 */
1814 int skip = i;
1813 j -= i; 1815 j -= i;
1814 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1816 xt_entry_foreach(iter0, entry0, newinfo->size) {
1815 compat_release_entry, &j); 1817 if (skip-- > 0)
1816 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1818 continue;
1819 if (j-- == 0)
1820 break;
1821 compat_release_entry(iter0);
1822 }
1823 xt_entry_foreach(iter1, entry1, newinfo->size) {
1824 if (i-- == 0)
1825 break;
1826 cleanup_entry(iter1, net);
1827 }
1817 xt_free_table_info(newinfo); 1828 xt_free_table_info(newinfo);
1818 return ret; 1829 return ret;
1819 } 1830 }
@@ -1831,7 +1842,11 @@ translate_compat_table(const char *name,
1831free_newinfo: 1842free_newinfo:
1832 xt_free_table_info(newinfo); 1843 xt_free_table_info(newinfo);
1833out: 1844out:
1834 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1845 xt_entry_foreach(iter0, entry0, total_size) {
1846 if (j-- == 0)
1847 break;
1848 compat_release_entry(iter0);
1849 }
1835 return ret; 1850 return ret;
1836out_unlock: 1851out_unlock:
1837 xt_compat_flush_offsets(AF_INET6); 1852 xt_compat_flush_offsets(AF_INET6);
@@ -1846,6 +1861,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1846 struct compat_ip6t_replace tmp; 1861 struct compat_ip6t_replace tmp;
1847 struct xt_table_info *newinfo; 1862 struct xt_table_info *newinfo;
1848 void *loc_cpu_entry; 1863 void *loc_cpu_entry;
1864 struct ip6t_entry *iter;
1849 1865
1850 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1866 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1851 return -EFAULT; 1867 return -EFAULT;
@@ -1868,7 +1884,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1868 goto free_newinfo; 1884 goto free_newinfo;
1869 } 1885 }
1870 1886
1871 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1887 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1872 &newinfo, &loc_cpu_entry, tmp.size, 1888 &newinfo, &loc_cpu_entry, tmp.size,
1873 tmp.num_entries, tmp.hook_entry, 1889 tmp.num_entries, tmp.hook_entry,
1874 tmp.underflow); 1890 tmp.underflow);
@@ -1884,7 +1900,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1884 return 0; 1900 return 0;
1885 1901
1886 free_newinfo_untrans: 1902 free_newinfo_untrans:
1887 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1903 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1904 cleanup_entry(iter, net);
1888 free_newinfo: 1905 free_newinfo:
1889 xt_free_table_info(newinfo); 1906 xt_free_table_info(newinfo);
1890 return ret; 1907 return ret;
@@ -1933,6 +1950,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1933 int ret = 0; 1950 int ret = 0;
1934 const void *loc_cpu_entry; 1951 const void *loc_cpu_entry;
1935 unsigned int i = 0; 1952 unsigned int i = 0;
1953 struct ip6t_entry *iter;
1936 1954
1937 counters = alloc_counters(table); 1955 counters = alloc_counters(table);
1938 if (IS_ERR(counters)) 1956 if (IS_ERR(counters))
@@ -1945,9 +1963,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1945 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1963 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1946 pos = userptr; 1964 pos = userptr;
1947 size = total_size; 1965 size = total_size;
1948 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size, 1966 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1949 compat_copy_entry_to_user, 1967 ret = compat_copy_entry_to_user(iter, &pos,
1950 &pos, &size, counters, &i); 1968 &size, counters, i++);
1969 if (ret != 0)
1970 break;
1971 }
1951 1972
1952 vfree(counters); 1973 vfree(counters);
1953 return ret; 1974 return ret;
@@ -2121,11 +2142,7 @@ struct xt_table *ip6t_register_table(struct net *net,
2121 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2142 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2122 memcpy(loc_cpu_entry, repl->entries, repl->size); 2143 memcpy(loc_cpu_entry, repl->entries, repl->size);
2123 2144
2124 ret = translate_table(table->name, table->valid_hooks, 2145 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2125 newinfo, loc_cpu_entry, repl->size,
2126 repl->num_entries,
2127 repl->hook_entry,
2128 repl->underflow);
2129 if (ret != 0) 2146 if (ret != 0)
2130 goto out_free; 2147 goto out_free;
2131 2148
@@ -2142,17 +2159,19 @@ out:
2142 return ERR_PTR(ret); 2159 return ERR_PTR(ret);
2143} 2160}
2144 2161
2145void ip6t_unregister_table(struct xt_table *table) 2162void ip6t_unregister_table(struct net *net, struct xt_table *table)
2146{ 2163{
2147 struct xt_table_info *private; 2164 struct xt_table_info *private;
2148 void *loc_cpu_entry; 2165 void *loc_cpu_entry;
2149 struct module *table_owner = table->me; 2166 struct module *table_owner = table->me;
2167 struct ip6t_entry *iter;
2150 2168
2151 private = xt_unregister_table(table); 2169 private = xt_unregister_table(table);
2152 2170
2153 /* Decrease module usage counts and free resources */ 2171 /* Decrease module usage counts and free resources */
2154 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2172 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2155 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2173 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2174 cleanup_entry(iter, net);
2156 if (private->number > private->initial_entries) 2175 if (private->number > private->initial_entries)
2157 module_put(table_owner); 2176 module_put(table_owner);
2158 xt_free_table_info(private); 2177 xt_free_table_info(private);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 8311ca31816a..39b50c3768e8 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -15,6 +15,7 @@
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17 17
18#include <linux/gfp.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/skbuff.h> 20#include <linux/skbuff.h>
20#include <linux/icmpv6.h> 21#include <linux/icmpv6.h>
@@ -169,7 +170,7 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
169 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) 170 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
170 skb_in->dev = net->loopback_dev; 171 skb_in->dev = net->loopback_dev;
171 172
172 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL); 173 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
173} 174}
174 175
175static unsigned int 176static unsigned int
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index ad378efd0eb8..d6fc9aff3163 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/netfilter_ipv6/ip6_tables.h> 14#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <linux/slab.h>
15 16
16MODULE_LICENSE("GPL"); 17MODULE_LICENSE("GPL");
17MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 18MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -21,99 +22,26 @@ MODULE_DESCRIPTION("ip6tables filter table");
21 (1 << NF_INET_FORWARD) | \ 22 (1 << NF_INET_FORWARD) | \
22 (1 << NF_INET_LOCAL_OUT)) 23 (1 << NF_INET_LOCAL_OUT))
23 24
24static struct
25{
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[3];
28 struct ip6t_error term;
29} initial_table __net_initdata = {
30 .repl = {
31 .name = "filter",
32 .valid_hooks = FILTER_VALID_HOOKS,
33 .num_entries = 4,
34 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_LOCAL_IN] = 0,
37 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
38 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
39 },
40 .underflow = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
44 },
45 },
46 .entries = {
47 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
48 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
49 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
50 },
51 .term = IP6T_ERROR_INIT, /* ERROR */
52};
53
54static const struct xt_table packet_filter = { 25static const struct xt_table packet_filter = {
55 .name = "filter", 26 .name = "filter",
56 .valid_hooks = FILTER_VALID_HOOKS, 27 .valid_hooks = FILTER_VALID_HOOKS,
57 .me = THIS_MODULE, 28 .me = THIS_MODULE,
58 .af = NFPROTO_IPV6, 29 .af = NFPROTO_IPV6,
30 .priority = NF_IP6_PRI_FILTER,
59}; 31};
60 32
61/* The work comes in here from netfilter.c. */ 33/* The work comes in here from netfilter.c. */
62static unsigned int 34static unsigned int
63ip6t_in_hook(unsigned int hook, 35ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
64 struct sk_buff *skb, 36 const struct net_device *in, const struct net_device *out,
65 const struct net_device *in, 37 int (*okfn)(struct sk_buff *))
66 const struct net_device *out,
67 int (*okfn)(struct sk_buff *))
68{
69 return ip6t_do_table(skb, hook, in, out,
70 dev_net(in)->ipv6.ip6table_filter);
71}
72
73static unsigned int
74ip6t_local_out_hook(unsigned int hook,
75 struct sk_buff *skb,
76 const struct net_device *in,
77 const struct net_device *out,
78 int (*okfn)(struct sk_buff *))
79{ 38{
80#if 0 39 const struct net *net = dev_net((in != NULL) ? in : out);
81 /* root is playing with raw sockets. */
82 if (skb->len < sizeof(struct iphdr) ||
83 ip_hdrlen(skb) < sizeof(struct iphdr)) {
84 if (net_ratelimit())
85 printk("ip6t_hook: happy cracking.\n");
86 return NF_ACCEPT;
87 }
88#endif
89 40
90 return ip6t_do_table(skb, hook, in, out, 41 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
91 dev_net(out)->ipv6.ip6table_filter);
92} 42}
93 43
94static struct nf_hook_ops ip6t_ops[] __read_mostly = { 44static struct nf_hook_ops *filter_ops __read_mostly;
95 {
96 .hook = ip6t_in_hook,
97 .owner = THIS_MODULE,
98 .pf = NFPROTO_IPV6,
99 .hooknum = NF_INET_LOCAL_IN,
100 .priority = NF_IP6_PRI_FILTER,
101 },
102 {
103 .hook = ip6t_in_hook,
104 .owner = THIS_MODULE,
105 .pf = NFPROTO_IPV6,
106 .hooknum = NF_INET_FORWARD,
107 .priority = NF_IP6_PRI_FILTER,
108 },
109 {
110 .hook = ip6t_local_out_hook,
111 .owner = THIS_MODULE,
112 .pf = NFPROTO_IPV6,
113 .hooknum = NF_INET_LOCAL_OUT,
114 .priority = NF_IP6_PRI_FILTER,
115 },
116};
117 45
118/* Default to forward because I got too much mail already. */ 46/* Default to forward because I got too much mail already. */
119static int forward = NF_ACCEPT; 47static int forward = NF_ACCEPT;
@@ -121,9 +49,18 @@ module_param(forward, bool, 0000);
121 49
122static int __net_init ip6table_filter_net_init(struct net *net) 50static int __net_init ip6table_filter_net_init(struct net *net)
123{ 51{
124 /* Register table */ 52 struct ip6t_replace *repl;
53
54 repl = ip6t_alloc_initial_table(&packet_filter);
55 if (repl == NULL)
56 return -ENOMEM;
57 /* Entry 1 is the FORWARD hook */
58 ((struct ip6t_standard *)repl->entries)[1].target.verdict =
59 -forward - 1;
60
125 net->ipv6.ip6table_filter = 61 net->ipv6.ip6table_filter =
126 ip6t_register_table(net, &packet_filter, &initial_table.repl); 62 ip6t_register_table(net, &packet_filter, repl);
63 kfree(repl);
127 if (IS_ERR(net->ipv6.ip6table_filter)) 64 if (IS_ERR(net->ipv6.ip6table_filter))
128 return PTR_ERR(net->ipv6.ip6table_filter); 65 return PTR_ERR(net->ipv6.ip6table_filter);
129 return 0; 66 return 0;
@@ -131,7 +68,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
131 68
132static void __net_exit ip6table_filter_net_exit(struct net *net) 69static void __net_exit ip6table_filter_net_exit(struct net *net)
133{ 70{
134 ip6t_unregister_table(net->ipv6.ip6table_filter); 71 ip6t_unregister_table(net, net->ipv6.ip6table_filter);
135} 72}
136 73
137static struct pernet_operations ip6table_filter_net_ops = { 74static struct pernet_operations ip6table_filter_net_ops = {
@@ -148,17 +85,16 @@ static int __init ip6table_filter_init(void)
148 return -EINVAL; 85 return -EINVAL;
149 } 86 }
150 87
151 /* Entry 1 is the FORWARD hook */
152 initial_table.entries[1].target.verdict = -forward - 1;
153
154 ret = register_pernet_subsys(&ip6table_filter_net_ops); 88 ret = register_pernet_subsys(&ip6table_filter_net_ops);
155 if (ret < 0) 89 if (ret < 0)
156 return ret; 90 return ret;
157 91
158 /* Register hooks */ 92 /* Register hooks */
159 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 93 filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook);
160 if (ret < 0) 94 if (IS_ERR(filter_ops)) {
95 ret = PTR_ERR(filter_ops);
161 goto cleanup_table; 96 goto cleanup_table;
97 }
162 98
163 return ret; 99 return ret;
164 100
@@ -169,7 +105,7 @@ static int __init ip6table_filter_init(void)
169 105
170static void __exit ip6table_filter_fini(void) 106static void __exit ip6table_filter_fini(void)
171{ 107{
172 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 108 xt_hook_unlink(&packet_filter, filter_ops);
173 unregister_pernet_subsys(&ip6table_filter_net_ops); 109 unregister_pernet_subsys(&ip6table_filter_net_ops);
174} 110}
175 111
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index a929c19d30e3..6a102b57f356 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/netfilter_ipv6/ip6_tables.h> 12#include <linux/netfilter_ipv6/ip6_tables.h>
13#include <linux/slab.h>
13 14
14MODULE_LICENSE("GPL"); 15MODULE_LICENSE("GPL");
15MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 16MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -21,80 +22,17 @@ MODULE_DESCRIPTION("ip6tables mangle table");
21 (1 << NF_INET_LOCAL_OUT) | \ 22 (1 << NF_INET_LOCAL_OUT) | \
22 (1 << NF_INET_POST_ROUTING)) 23 (1 << NF_INET_POST_ROUTING))
23 24
24static const struct
25{
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[5];
28 struct ip6t_error term;
29} initial_table __net_initdata = {
30 .repl = {
31 .name = "mangle",
32 .valid_hooks = MANGLE_VALID_HOOKS,
33 .num_entries = 6,
34 .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_PRE_ROUTING] = 0,
37 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
38 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
39 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
40 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
41 },
42 .underflow = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
45 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
48 },
49 },
50 .entries = {
51 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 },
57 .term = IP6T_ERROR_INIT, /* ERROR */
58};
59
60static const struct xt_table packet_mangler = { 25static const struct xt_table packet_mangler = {
61 .name = "mangle", 26 .name = "mangle",
62 .valid_hooks = MANGLE_VALID_HOOKS, 27 .valid_hooks = MANGLE_VALID_HOOKS,
63 .me = THIS_MODULE, 28 .me = THIS_MODULE,
64 .af = NFPROTO_IPV6, 29 .af = NFPROTO_IPV6,
30 .priority = NF_IP6_PRI_MANGLE,
65}; 31};
66 32
67/* The work comes in here from netfilter.c. */
68static unsigned int
69ip6t_in_hook(unsigned int hook,
70 struct sk_buff *skb,
71 const struct net_device *in,
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{
75 return ip6t_do_table(skb, hook, in, out,
76 dev_net(in)->ipv6.ip6table_mangle);
77}
78
79static unsigned int
80ip6t_post_routing_hook(unsigned int hook,
81 struct sk_buff *skb,
82 const struct net_device *in,
83 const struct net_device *out,
84 int (*okfn)(struct sk_buff *))
85{
86 return ip6t_do_table(skb, hook, in, out,
87 dev_net(out)->ipv6.ip6table_mangle);
88}
89
90static unsigned int 33static unsigned int
91ip6t_local_out_hook(unsigned int hook, 34ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
92 struct sk_buff *skb,
93 const struct net_device *in,
94 const struct net_device *out,
95 int (*okfn)(struct sk_buff *))
96{ 35{
97
98 unsigned int ret; 36 unsigned int ret;
99 struct in6_addr saddr, daddr; 37 struct in6_addr saddr, daddr;
100 u_int8_t hop_limit; 38 u_int8_t hop_limit;
@@ -119,7 +57,7 @@ ip6t_local_out_hook(unsigned int hook,
119 /* flowlabel and prio (includes version, which shouldn't change either */ 57 /* flowlabel and prio (includes version, which shouldn't change either */
120 flowlabel = *((u_int32_t *)ipv6_hdr(skb)); 58 flowlabel = *((u_int32_t *)ipv6_hdr(skb));
121 59
122 ret = ip6t_do_table(skb, hook, in, out, 60 ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
123 dev_net(out)->ipv6.ip6table_mangle); 61 dev_net(out)->ipv6.ip6table_mangle);
124 62
125 if (ret != NF_DROP && ret != NF_STOLEN && 63 if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -132,49 +70,33 @@ ip6t_local_out_hook(unsigned int hook,
132 return ret; 70 return ret;
133} 71}
134 72
135static struct nf_hook_ops ip6t_ops[] __read_mostly = { 73/* The work comes in here from netfilter.c. */
136 { 74static unsigned int
137 .hook = ip6t_in_hook, 75ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
138 .owner = THIS_MODULE, 76 const struct net_device *in, const struct net_device *out,
139 .pf = NFPROTO_IPV6, 77 int (*okfn)(struct sk_buff *))
140 .hooknum = NF_INET_PRE_ROUTING, 78{
141 .priority = NF_IP6_PRI_MANGLE, 79 if (hook == NF_INET_LOCAL_OUT)
142 }, 80 return ip6t_mangle_out(skb, out);
143 { 81 if (hook == NF_INET_POST_ROUTING)
144 .hook = ip6t_in_hook, 82 return ip6t_do_table(skb, hook, in, out,
145 .owner = THIS_MODULE, 83 dev_net(out)->ipv6.ip6table_mangle);
146 .pf = NFPROTO_IPV6, 84 /* INPUT/FORWARD */
147 .hooknum = NF_INET_LOCAL_IN, 85 return ip6t_do_table(skb, hook, in, out,
148 .priority = NF_IP6_PRI_MANGLE, 86 dev_net(in)->ipv6.ip6table_mangle);
149 }, 87}
150 {
151 .hook = ip6t_in_hook,
152 .owner = THIS_MODULE,
153 .pf = NFPROTO_IPV6,
154 .hooknum = NF_INET_FORWARD,
155 .priority = NF_IP6_PRI_MANGLE,
156 },
157 {
158 .hook = ip6t_local_out_hook,
159 .owner = THIS_MODULE,
160 .pf = NFPROTO_IPV6,
161 .hooknum = NF_INET_LOCAL_OUT,
162 .priority = NF_IP6_PRI_MANGLE,
163 },
164 {
165 .hook = ip6t_post_routing_hook,
166 .owner = THIS_MODULE,
167 .pf = NFPROTO_IPV6,
168 .hooknum = NF_INET_POST_ROUTING,
169 .priority = NF_IP6_PRI_MANGLE,
170 },
171};
172 88
89static struct nf_hook_ops *mangle_ops __read_mostly;
173static int __net_init ip6table_mangle_net_init(struct net *net) 90static int __net_init ip6table_mangle_net_init(struct net *net)
174{ 91{
175 /* Register table */ 92 struct ip6t_replace *repl;
93
94 repl = ip6t_alloc_initial_table(&packet_mangler);
95 if (repl == NULL)
96 return -ENOMEM;
176 net->ipv6.ip6table_mangle = 97 net->ipv6.ip6table_mangle =
177 ip6t_register_table(net, &packet_mangler, &initial_table.repl); 98 ip6t_register_table(net, &packet_mangler, repl);
99 kfree(repl);
178 if (IS_ERR(net->ipv6.ip6table_mangle)) 100 if (IS_ERR(net->ipv6.ip6table_mangle))
179 return PTR_ERR(net->ipv6.ip6table_mangle); 101 return PTR_ERR(net->ipv6.ip6table_mangle);
180 return 0; 102 return 0;
@@ -182,7 +104,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
182 104
183static void __net_exit ip6table_mangle_net_exit(struct net *net) 105static void __net_exit ip6table_mangle_net_exit(struct net *net)
184{ 106{
185 ip6t_unregister_table(net->ipv6.ip6table_mangle); 107 ip6t_unregister_table(net, net->ipv6.ip6table_mangle);
186} 108}
187 109
188static struct pernet_operations ip6table_mangle_net_ops = { 110static struct pernet_operations ip6table_mangle_net_ops = {
@@ -199,9 +121,11 @@ static int __init ip6table_mangle_init(void)
199 return ret; 121 return ret;
200 122
201 /* Register hooks */ 123 /* Register hooks */
202 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 124 mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook);
203 if (ret < 0) 125 if (IS_ERR(mangle_ops)) {
126 ret = PTR_ERR(mangle_ops);
204 goto cleanup_table; 127 goto cleanup_table;
128 }
205 129
206 return ret; 130 return ret;
207 131
@@ -212,7 +136,7 @@ static int __init ip6table_mangle_init(void)
212 136
213static void __exit ip6table_mangle_fini(void) 137static void __exit ip6table_mangle_fini(void)
214{ 138{
215 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 139 xt_hook_unlink(&packet_mangler, mangle_ops);
216 unregister_pernet_subsys(&ip6table_mangle_net_ops); 140 unregister_pernet_subsys(&ip6table_mangle_net_ops);
217} 141}
218 142
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index ed1a1180f3b3..5b9926a011bd 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -5,88 +5,41 @@
5 */ 5 */
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/netfilter_ipv6/ip6_tables.h> 7#include <linux/netfilter_ipv6/ip6_tables.h>
8#include <linux/slab.h>
8 9
9#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
10 11
11static const struct
12{
13 struct ip6t_replace repl;
14 struct ip6t_standard entries[2];
15 struct ip6t_error term;
16} initial_table __net_initdata = {
17 .repl = {
18 .name = "raw",
19 .valid_hooks = RAW_VALID_HOOKS,
20 .num_entries = 3,
21 .size = sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
22 .hook_entry = {
23 [NF_INET_PRE_ROUTING] = 0,
24 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
25 },
26 .underflow = {
27 [NF_INET_PRE_ROUTING] = 0,
28 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
29 },
30 },
31 .entries = {
32 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
33 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
34 },
35 .term = IP6T_ERROR_INIT, /* ERROR */
36};
37
38static const struct xt_table packet_raw = { 12static const struct xt_table packet_raw = {
39 .name = "raw", 13 .name = "raw",
40 .valid_hooks = RAW_VALID_HOOKS, 14 .valid_hooks = RAW_VALID_HOOKS,
41 .me = THIS_MODULE, 15 .me = THIS_MODULE,
42 .af = NFPROTO_IPV6, 16 .af = NFPROTO_IPV6,
17 .priority = NF_IP6_PRI_RAW,
43}; 18};
44 19
45/* The work comes in here from netfilter.c. */ 20/* The work comes in here from netfilter.c. */
46static unsigned int 21static unsigned int
47ip6t_pre_routing_hook(unsigned int hook, 22ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
48 struct sk_buff *skb, 23 const struct net_device *in, const struct net_device *out,
49 const struct net_device *in, 24 int (*okfn)(struct sk_buff *))
50 const struct net_device *out,
51 int (*okfn)(struct sk_buff *))
52{ 25{
53 return ip6t_do_table(skb, hook, in, out, 26 const struct net *net = dev_net((in != NULL) ? in : out);
54 dev_net(in)->ipv6.ip6table_raw);
55}
56 27
57static unsigned int 28 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
58ip6t_local_out_hook(unsigned int hook,
59 struct sk_buff *skb,
60 const struct net_device *in,
61 const struct net_device *out,
62 int (*okfn)(struct sk_buff *))
63{
64 return ip6t_do_table(skb, hook, in, out,
65 dev_net(out)->ipv6.ip6table_raw);
66} 29}
67 30
68static struct nf_hook_ops ip6t_ops[] __read_mostly = { 31static struct nf_hook_ops *rawtable_ops __read_mostly;
69 {
70 .hook = ip6t_pre_routing_hook,
71 .pf = NFPROTO_IPV6,
72 .hooknum = NF_INET_PRE_ROUTING,
73 .priority = NF_IP6_PRI_FIRST,
74 .owner = THIS_MODULE,
75 },
76 {
77 .hook = ip6t_local_out_hook,
78 .pf = NFPROTO_IPV6,
79 .hooknum = NF_INET_LOCAL_OUT,
80 .priority = NF_IP6_PRI_FIRST,
81 .owner = THIS_MODULE,
82 },
83};
84 32
85static int __net_init ip6table_raw_net_init(struct net *net) 33static int __net_init ip6table_raw_net_init(struct net *net)
86{ 34{
87 /* Register table */ 35 struct ip6t_replace *repl;
36
37 repl = ip6t_alloc_initial_table(&packet_raw);
38 if (repl == NULL)
39 return -ENOMEM;
88 net->ipv6.ip6table_raw = 40 net->ipv6.ip6table_raw =
89 ip6t_register_table(net, &packet_raw, &initial_table.repl); 41 ip6t_register_table(net, &packet_raw, repl);
42 kfree(repl);
90 if (IS_ERR(net->ipv6.ip6table_raw)) 43 if (IS_ERR(net->ipv6.ip6table_raw))
91 return PTR_ERR(net->ipv6.ip6table_raw); 44 return PTR_ERR(net->ipv6.ip6table_raw);
92 return 0; 45 return 0;
@@ -94,7 +47,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
94 47
95static void __net_exit ip6table_raw_net_exit(struct net *net) 48static void __net_exit ip6table_raw_net_exit(struct net *net)
96{ 49{
97 ip6t_unregister_table(net->ipv6.ip6table_raw); 50 ip6t_unregister_table(net, net->ipv6.ip6table_raw);
98} 51}
99 52
100static struct pernet_operations ip6table_raw_net_ops = { 53static struct pernet_operations ip6table_raw_net_ops = {
@@ -111,9 +64,11 @@ static int __init ip6table_raw_init(void)
111 return ret; 64 return ret;
112 65
113 /* Register hooks */ 66 /* Register hooks */
114 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 67 rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook);
115 if (ret < 0) 68 if (IS_ERR(rawtable_ops)) {
69 ret = PTR_ERR(rawtable_ops);
116 goto cleanup_table; 70 goto cleanup_table;
71 }
117 72
118 return ret; 73 return ret;
119 74
@@ -124,7 +79,7 @@ static int __init ip6table_raw_init(void)
124 79
125static void __exit ip6table_raw_fini(void) 80static void __exit ip6table_raw_fini(void)
126{ 81{
127 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 82 xt_hook_unlink(&packet_raw, rawtable_ops);
128 unregister_pernet_subsys(&ip6table_raw_net_ops); 83 unregister_pernet_subsys(&ip6table_raw_net_ops);
129} 84}
130 85
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 41b444c60934..91aa2b4d83c9 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/netfilter_ipv6/ip6_tables.h> 19#include <linux/netfilter_ipv6/ip6_tables.h>
20#include <linux/slab.h>
20 21
21MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
22MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); 23MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
@@ -26,106 +27,37 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
26 (1 << NF_INET_FORWARD) | \ 27 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT) 28 (1 << NF_INET_LOCAL_OUT)
28 29
29static const struct
30{
31 struct ip6t_replace repl;
32 struct ip6t_standard entries[3];
33 struct ip6t_error term;
34} initial_table __net_initdata = {
35 .repl = {
36 .name = "security",
37 .valid_hooks = SECURITY_VALID_HOOKS,
38 .num_entries = 4,
39 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
40 .hook_entry = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
44 },
45 .underflow = {
46 [NF_INET_LOCAL_IN] = 0,
47 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
48 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
49 },
50 },
51 .entries = {
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 },
56 .term = IP6T_ERROR_INIT, /* ERROR */
57};
58
59static const struct xt_table security_table = { 30static const struct xt_table security_table = {
60 .name = "security", 31 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS, 32 .valid_hooks = SECURITY_VALID_HOOKS,
62 .me = THIS_MODULE, 33 .me = THIS_MODULE,
63 .af = NFPROTO_IPV6, 34 .af = NFPROTO_IPV6,
35 .priority = NF_IP6_PRI_SECURITY,
64}; 36};
65 37
66static unsigned int 38static unsigned int
67ip6t_local_in_hook(unsigned int hook, 39ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
68 struct sk_buff *skb, 40 const struct net_device *in,
69 const struct net_device *in, 41 const struct net_device *out,
70 const struct net_device *out, 42 int (*okfn)(struct sk_buff *))
71 int (*okfn)(struct sk_buff *))
72{
73 return ip6t_do_table(skb, hook, in, out,
74 dev_net(in)->ipv6.ip6table_security);
75}
76
77static unsigned int
78ip6t_forward_hook(unsigned int hook,
79 struct sk_buff *skb,
80 const struct net_device *in,
81 const struct net_device *out,
82 int (*okfn)(struct sk_buff *))
83{ 43{
84 return ip6t_do_table(skb, hook, in, out, 44 const struct net *net = dev_net((in != NULL) ? in : out);
85 dev_net(in)->ipv6.ip6table_security);
86}
87 45
88static unsigned int 46 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
89ip6t_local_out_hook(unsigned int hook,
90 struct sk_buff *skb,
91 const struct net_device *in,
92 const struct net_device *out,
93 int (*okfn)(struct sk_buff *))
94{
95 /* TBD: handle short packets via raw socket */
96 return ip6t_do_table(skb, hook, in, out,
97 dev_net(out)->ipv6.ip6table_security);
98} 47}
99 48
100static struct nf_hook_ops ip6t_ops[] __read_mostly = { 49static struct nf_hook_ops *sectbl_ops __read_mostly;
101 {
102 .hook = ip6t_local_in_hook,
103 .owner = THIS_MODULE,
104 .pf = NFPROTO_IPV6,
105 .hooknum = NF_INET_LOCAL_IN,
106 .priority = NF_IP6_PRI_SECURITY,
107 },
108 {
109 .hook = ip6t_forward_hook,
110 .owner = THIS_MODULE,
111 .pf = NFPROTO_IPV6,
112 .hooknum = NF_INET_FORWARD,
113 .priority = NF_IP6_PRI_SECURITY,
114 },
115 {
116 .hook = ip6t_local_out_hook,
117 .owner = THIS_MODULE,
118 .pf = NFPROTO_IPV6,
119 .hooknum = NF_INET_LOCAL_OUT,
120 .priority = NF_IP6_PRI_SECURITY,
121 },
122};
123 50
124static int __net_init ip6table_security_net_init(struct net *net) 51static int __net_init ip6table_security_net_init(struct net *net)
125{ 52{
126 net->ipv6.ip6table_security = 53 struct ip6t_replace *repl;
127 ip6t_register_table(net, &security_table, &initial_table.repl);
128 54
55 repl = ip6t_alloc_initial_table(&security_table);
56 if (repl == NULL)
57 return -ENOMEM;
58 net->ipv6.ip6table_security =
59 ip6t_register_table(net, &security_table, repl);
60 kfree(repl);
129 if (IS_ERR(net->ipv6.ip6table_security)) 61 if (IS_ERR(net->ipv6.ip6table_security))
130 return PTR_ERR(net->ipv6.ip6table_security); 62 return PTR_ERR(net->ipv6.ip6table_security);
131 63
@@ -134,7 +66,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
134 66
135static void __net_exit ip6table_security_net_exit(struct net *net) 67static void __net_exit ip6table_security_net_exit(struct net *net)
136{ 68{
137 ip6t_unregister_table(net->ipv6.ip6table_security); 69 ip6t_unregister_table(net, net->ipv6.ip6table_security);
138} 70}
139 71
140static struct pernet_operations ip6table_security_net_ops = { 72static struct pernet_operations ip6table_security_net_ops = {
@@ -150,9 +82,11 @@ static int __init ip6table_security_init(void)
150 if (ret < 0) 82 if (ret < 0)
151 return ret; 83 return ret;
152 84
153 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 85 sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook);
154 if (ret < 0) 86 if (IS_ERR(sectbl_ops)) {
87 ret = PTR_ERR(sectbl_ops);
155 goto cleanup_table; 88 goto cleanup_table;
89 }
156 90
157 return ret; 91 return ret;
158 92
@@ -163,7 +97,7 @@ cleanup_table:
163 97
164static void __exit ip6table_security_fini(void) 98static void __exit ip6table_security_fini(void)
165{ 99{
166 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 100 xt_hook_unlink(&security_table, sectbl_ops);
167 unregister_pernet_subsys(&ip6table_security_net_ops); 101 unregister_pernet_subsys(&ip6table_security_net_ops);
168} 102}
169 103
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 0956ebabbff2..996c3f41fecd 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -27,6 +27,7 @@
27#include <net/netfilter/nf_conntrack_l4proto.h> 27#include <net/netfilter/nf_conntrack_l4proto.h>
28#include <net/netfilter/nf_conntrack_l3proto.h> 28#include <net/netfilter/nf_conntrack_l3proto.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_zones.h>
30#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 31#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
31#include <net/netfilter/nf_log.h> 32#include <net/netfilter/nf_log.h>
32 33
@@ -191,15 +192,20 @@ out:
191static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, 192static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
192 struct sk_buff *skb) 193 struct sk_buff *skb)
193{ 194{
195 u16 zone = NF_CT_DEFAULT_ZONE;
196
197 if (skb->nfct)
198 zone = nf_ct_zone((struct nf_conn *)skb->nfct);
199
194#ifdef CONFIG_BRIDGE_NETFILTER 200#ifdef CONFIG_BRIDGE_NETFILTER
195 if (skb->nf_bridge && 201 if (skb->nf_bridge &&
196 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 202 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
197 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; 203 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
198#endif 204#endif
199 if (hooknum == NF_INET_PRE_ROUTING) 205 if (hooknum == NF_INET_PRE_ROUTING)
200 return IP6_DEFRAG_CONNTRACK_IN; 206 return IP6_DEFRAG_CONNTRACK_IN + zone;
201 else 207 else
202 return IP6_DEFRAG_CONNTRACK_OUT; 208 return IP6_DEFRAG_CONNTRACK_OUT + zone;
203 209
204} 210}
205 211
@@ -212,7 +218,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
212 struct sk_buff *reasm; 218 struct sk_buff *reasm;
213 219
214 /* Previously seen (loopback)? */ 220 /* Previously seen (loopback)? */
215 if (skb->nfct) 221 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
216 return NF_ACCEPT; 222 return NF_ACCEPT;
217 223
218 reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); 224 reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c7b8bd1d7984..9be81776415e 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -23,6 +23,7 @@
23#include <net/netfilter/nf_conntrack_tuple.h> 23#include <net/netfilter/nf_conntrack_tuple.h>
24#include <net/netfilter/nf_conntrack_l4proto.h> 24#include <net/netfilter/nf_conntrack_l4proto.h>
25#include <net/netfilter/nf_conntrack_core.h> 25#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/nf_conntrack_zones.h>
26#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> 27#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
27#include <net/netfilter/nf_log.h> 28#include <net/netfilter/nf_log.h>
28 29
@@ -128,7 +129,7 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
128} 129}
129 130
130static int 131static int
131icmpv6_error_message(struct net *net, 132icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
132 struct sk_buff *skb, 133 struct sk_buff *skb,
133 unsigned int icmp6off, 134 unsigned int icmp6off,
134 enum ip_conntrack_info *ctinfo, 135 enum ip_conntrack_info *ctinfo,
@@ -137,6 +138,7 @@ icmpv6_error_message(struct net *net,
137 struct nf_conntrack_tuple intuple, origtuple; 138 struct nf_conntrack_tuple intuple, origtuple;
138 const struct nf_conntrack_tuple_hash *h; 139 const struct nf_conntrack_tuple_hash *h;
139 const struct nf_conntrack_l4proto *inproto; 140 const struct nf_conntrack_l4proto *inproto;
141 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
140 142
141 NF_CT_ASSERT(skb->nfct == NULL); 143 NF_CT_ASSERT(skb->nfct == NULL);
142 144
@@ -163,7 +165,7 @@ icmpv6_error_message(struct net *net,
163 165
164 *ctinfo = IP_CT_RELATED; 166 *ctinfo = IP_CT_RELATED;
165 167
166 h = nf_conntrack_find_get(net, &intuple); 168 h = nf_conntrack_find_get(net, zone, &intuple);
167 if (!h) { 169 if (!h) {
168 pr_debug("icmpv6_error: no match\n"); 170 pr_debug("icmpv6_error: no match\n");
169 return -NF_ACCEPT; 171 return -NF_ACCEPT;
@@ -179,7 +181,8 @@ icmpv6_error_message(struct net *net,
179} 181}
180 182
181static int 183static int
182icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 184icmpv6_error(struct net *net, struct nf_conn *tmpl,
185 struct sk_buff *skb, unsigned int dataoff,
183 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 186 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
184{ 187{
185 const struct icmp6hdr *icmp6h; 188 const struct icmp6hdr *icmp6h;
@@ -215,7 +218,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
215 if (icmp6h->icmp6_type >= 128) 218 if (icmp6h->icmp6_type >= 128)
216 return NF_ACCEPT; 219 return NF_ACCEPT;
217 220
218 return icmpv6_error_message(net, skb, dataoff, ctinfo, hooknum); 221 return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
219} 222}
220 223
221#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 224#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 624a54832a7c..dd5b9bd61c62 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -27,6 +27,7 @@
27#include <linux/ipv6.h> 27#include <linux/ipv6.h>
28#include <linux/icmpv6.h> 28#include <linux/icmpv6.h>
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/slab.h>
30 31
31#include <net/sock.h> 32#include <net/sock.h>
32#include <net/snmp.h> 33#include <net/snmp.h>
@@ -45,9 +46,6 @@
45#include <linux/kernel.h> 46#include <linux/kernel.h>
46#include <linux/module.h> 47#include <linux/module.h>
47 48
48#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
49#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
50#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
51 49
52struct nf_ct_frag6_skb_cb 50struct nf_ct_frag6_skb_cb
53{ 51{
@@ -472,7 +470,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
472 470
473 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ 471 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
474 fp = skb_shinfo(head)->frag_list; 472 fp = skb_shinfo(head)->frag_list;
475 if (NFCT_FRAG6_CB(fp)->orig == NULL) 473 if (fp && NFCT_FRAG6_CB(fp)->orig == NULL)
476 /* at above code, head skb is divided into two skbs. */ 474 /* at above code, head skb is divided into two skbs. */
477 fp = fp->next; 475 fp = fp->next;
478 476
@@ -598,12 +596,6 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
598 hdr = ipv6_hdr(clone); 596 hdr = ipv6_hdr(clone);
599 fhdr = (struct frag_hdr *)skb_transport_header(clone); 597 fhdr = (struct frag_hdr *)skb_transport_header(clone);
600 598
601 if (!(fhdr->frag_off & htons(0xFFF9))) {
602 pr_debug("Invalid fragment offset\n");
603 /* It is not a fragmented frame */
604 goto ret_orig;
605 }
606
607 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) 599 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
608 nf_ct_frag6_evictor(); 600 nf_ct_frag6_evictor();
609 601
@@ -670,8 +662,8 @@ int nf_ct_frag6_init(void)
670 nf_frags.frag_expire = nf_ct_frag6_expire; 662 nf_frags.frag_expire = nf_ct_frag6_expire;
671 nf_frags.secret_interval = 10 * 60 * HZ; 663 nf_frags.secret_interval = 10 * 60 * HZ;
672 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT; 664 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
673 nf_init_frags.high_thresh = 256 * 1024; 665 nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
674 nf_init_frags.low_thresh = 192 * 1024; 666 nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
675 inet_frags_init_net(&nf_init_frags); 667 inet_frags_init_net(&nf_init_frags);
676 inet_frags_init(&nf_frags); 668 inet_frags_init(&nf_frags);
677 669
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index c9605c3ad91f..58344c0fbd13 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -59,7 +59,7 @@ static const struct file_operations sockstat6_seq_fops = {
59 .release = single_release_net, 59 .release = single_release_net,
60}; 60};
61 61
62static struct snmp_mib snmp6_ipstats_list[] = { 62static const struct snmp_mib snmp6_ipstats_list[] = {
63/* ipv6 mib according to RFC 2465 */ 63/* ipv6 mib according to RFC 2465 */
64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS), 64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS),
65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), 65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
@@ -92,7 +92,7 @@ static struct snmp_mib snmp6_ipstats_list[] = {
92 SNMP_MIB_SENTINEL 92 SNMP_MIB_SENTINEL
93}; 93};
94 94
95static struct snmp_mib snmp6_icmp6_list[] = { 95static const struct snmp_mib snmp6_icmp6_list[] = {
96/* icmpv6 mib according to RFC 2466 */ 96/* icmpv6 mib according to RFC 2466 */
97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), 97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), 98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
@@ -120,7 +120,7 @@ static const char *const icmp6type2name[256] = {
120}; 120};
121 121
122 122
123static struct snmp_mib snmp6_udp6_list[] = { 123static const struct snmp_mib snmp6_udp6_list[] = {
124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS), 124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), 125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), 126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
@@ -128,7 +128,7 @@ static struct snmp_mib snmp6_udp6_list[] = {
128 SNMP_MIB_SENTINEL 128 SNMP_MIB_SENTINEL
129}; 129};
130 130
131static struct snmp_mib snmp6_udplite6_list[] = { 131static const struct snmp_mib snmp6_udplite6_list[] = {
132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), 132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS),
133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), 133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), 134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
@@ -136,7 +136,7 @@ static struct snmp_mib snmp6_udplite6_list[] = {
136 SNMP_MIB_SENTINEL 136 SNMP_MIB_SENTINEL
137}; 137};
138 138
139static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) 139static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
140{ 140{
141 char name[32]; 141 char name[32];
142 int i; 142 int i;
@@ -170,8 +170,8 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
170 return; 170 return;
171} 171}
172 172
173static inline void 173static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib,
174snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist) 174 const struct snmp_mib *itemlist)
175{ 175{
176 int i; 176 int i;
177 for (i=0; itemlist[i].name; i++) 177 for (i=0; itemlist[i].name; i++)
@@ -183,14 +183,15 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
183{ 183{
184 struct net *net = (struct net *)seq->private; 184 struct net *net = (struct net *)seq->private;
185 185
186 snmp6_seq_show_item(seq, (void **)net->mib.ipv6_statistics, 186 snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics,
187 snmp6_ipstats_list); 187 snmp6_ipstats_list);
188 snmp6_seq_show_item(seq, (void **)net->mib.icmpv6_statistics, 188 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
189 snmp6_icmp6_list); 189 snmp6_icmp6_list);
190 snmp6_seq_show_icmpv6msg(seq, (void **)net->mib.icmpv6msg_statistics); 190 snmp6_seq_show_icmpv6msg(seq,
191 snmp6_seq_show_item(seq, (void **)net->mib.udp_stats_in6, 191 (void __percpu **)net->mib.icmpv6msg_statistics);
192 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
192 snmp6_udp6_list); 193 snmp6_udp6_list);
193 snmp6_seq_show_item(seq, (void **)net->mib.udplite_stats_in6, 194 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
194 snmp6_udplite6_list); 195 snmp6_udplite6_list);
195 return 0; 196 return 0;
196} 197}
@@ -213,9 +214,11 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
213 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 214 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
214 215
215 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 216 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
216 snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list); 217 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6,
217 snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); 218 snmp6_ipstats_list);
218 snmp6_seq_show_icmpv6msg(seq, (void **)idev->stats.icmpv6msg); 219 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.icmpv6,
220 snmp6_icmp6_list);
221 snmp6_seq_show_icmpv6msg(seq, (void __percpu **)idev->stats.icmpv6msg);
219 return 0; 222 return 0;
220} 223}
221 224
@@ -259,7 +262,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
259 struct net *net = dev_net(idev->dev); 262 struct net *net = dev_net(idev->dev);
260 if (!net->mib.proc_net_devsnmp6) 263 if (!net->mib.proc_net_devsnmp6)
261 return -ENOENT; 264 return -ENOENT;
262 if (!idev || !idev->stats.proc_dir_entry) 265 if (!idev->stats.proc_dir_entry)
263 return -EINVAL; 266 return -EINVAL;
264 remove_proc_entry(idev->stats.proc_dir_entry->name, 267 remove_proc_entry(idev->stats.proc_dir_entry->name,
265 net->mib.proc_net_devsnmp6); 268 net->mib.proc_net_devsnmp6);
@@ -267,7 +270,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
267 return 0; 270 return 0;
268} 271}
269 272
270static int ipv6_proc_init_net(struct net *net) 273static int __net_init ipv6_proc_init_net(struct net *net)
271{ 274{
272 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO, 275 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO,
273 &sockstat6_seq_fops)) 276 &sockstat6_seq_fops))
@@ -288,7 +291,7 @@ proc_dev_snmp6_fail:
288 return -ENOMEM; 291 return -ENOMEM;
289} 292}
290 293
291static void ipv6_proc_exit_net(struct net *net) 294static void __net_exit ipv6_proc_exit_net(struct net *net)
292{ 295{
293 proc_net_remove(net, "sockstat6"); 296 proc_net_remove(net, "sockstat6");
294 proc_net_remove(net, "dev_snmp6"); 297 proc_net_remove(net, "dev_snmp6");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 926ce8eeffaf..8763b1a0814a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -21,6 +21,7 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/socket.h> 23#include <linux/socket.h>
24#include <linux/slab.h>
24#include <linux/sockios.h> 25#include <linux/sockios.h>
25#include <linux/net.h> 26#include <linux/net.h>
26#include <linux/in6.h> 27#include <linux/in6.h>
@@ -1275,7 +1276,7 @@ static const struct file_operations raw6_seq_fops = {
1275 .release = seq_release_net, 1276 .release = seq_release_net,
1276}; 1277};
1277 1278
1278static int raw6_init_net(struct net *net) 1279static int __net_init raw6_init_net(struct net *net)
1279{ 1280{
1280 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) 1281 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops))
1281 return -ENOMEM; 1282 return -ENOMEM;
@@ -1283,7 +1284,7 @@ static int raw6_init_net(struct net *net)
1283 return 0; 1284 return 0;
1284} 1285}
1285 1286
1286static void raw6_exit_net(struct net *net) 1287static void __net_exit raw6_exit_net(struct net *net)
1287{ 1288{
1288 proc_net_remove(net, "raw6"); 1289 proc_net_remove(net, "raw6");
1289} 1290}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2cddea3bd6be..6d4292ff5854 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -41,6 +41,7 @@
41#include <linux/random.h> 41#include <linux/random.h>
42#include <linux/jhash.h> 42#include <linux/jhash.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/slab.h>
44 45
45#include <net/sock.h> 46#include <net/sock.h>
46#include <net/snmp.h> 47#include <net/snmp.h>
@@ -228,7 +229,7 @@ static void ip6_frag_expire(unsigned long data)
228 pointer directly, device might already disappeared. 229 pointer directly, device might already disappeared.
229 */ 230 */
230 fq->q.fragments->dev = dev; 231 fq->q.fragments->dev = dev;
231 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 232 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
232out_rcu_unlock: 233out_rcu_unlock:
233 rcu_read_unlock(); 234 rcu_read_unlock();
234out: 235out:
@@ -237,8 +238,7 @@ out:
237} 238}
238 239
239static __inline__ struct frag_queue * 240static __inline__ struct frag_queue *
240fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, 241fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst)
241 struct inet6_dev *idev)
242{ 242{
243 struct inet_frag_queue *q; 243 struct inet_frag_queue *q;
244 struct ip6_create_arg arg; 244 struct ip6_create_arg arg;
@@ -254,13 +254,9 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
254 254
255 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 255 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
256 if (q == NULL) 256 if (q == NULL)
257 goto oom; 257 return NULL;
258 258
259 return container_of(q, struct frag_queue, q); 259 return container_of(q, struct frag_queue, q);
260
261oom:
262 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS);
263 return NULL;
264} 260}
265 261
266static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 262static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
@@ -606,8 +602,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
606 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 602 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
607 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 603 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
608 604
609 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 605 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
610 ip6_dst_idev(skb_dst(skb)))) != NULL) { 606 if (fq != NULL) {
611 int ret; 607 int ret;
612 608
613 spin_lock(&fq->q.lock); 609 spin_lock(&fq->q.lock);
@@ -672,7 +668,7 @@ static struct ctl_table ip6_frags_ctl_table[] = {
672 { } 668 { }
673}; 669};
674 670
675static int ip6_frags_ns_sysctl_register(struct net *net) 671static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
676{ 672{
677 struct ctl_table *table; 673 struct ctl_table *table;
678 struct ctl_table_header *hdr; 674 struct ctl_table_header *hdr;
@@ -702,7 +698,7 @@ err_alloc:
702 return -ENOMEM; 698 return -ENOMEM;
703} 699}
704 700
705static void ip6_frags_ns_sysctl_unregister(struct net *net) 701static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
706{ 702{
707 struct ctl_table *table; 703 struct ctl_table *table;
708 704
@@ -745,10 +741,10 @@ static inline void ip6_frags_sysctl_unregister(void)
745} 741}
746#endif 742#endif
747 743
748static int ipv6_frags_init_net(struct net *net) 744static int __net_init ipv6_frags_init_net(struct net *net)
749{ 745{
750 net->ipv6.frags.high_thresh = 256 * 1024; 746 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
751 net->ipv6.frags.low_thresh = 192 * 1024; 747 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
752 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 748 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
753 749
754 inet_frags_init_net(&net->ipv6.frags); 750 inet_frags_init_net(&net->ipv6.frags);
@@ -756,7 +752,7 @@ static int ipv6_frags_init_net(struct net *net)
756 return ip6_frags_ns_sysctl_register(net); 752 return ip6_frags_ns_sysctl_register(net);
757} 753}
758 754
759static void ipv6_frags_exit_net(struct net *net) 755static void __net_exit ipv6_frags_exit_net(struct net *net)
760{ 756{
761 ip6_frags_ns_sysctl_unregister(net); 757 ip6_frags_ns_sysctl_unregister(net);
762 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 758 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c2bd74c5f8d9..05ebd7833043 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -40,6 +40,7 @@
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h> 41#include <linux/seq_file.h>
42#include <linux/nsproxy.h> 42#include <linux/nsproxy.h>
43#include <linux/slab.h>
43#include <net/net_namespace.h> 44#include <net/net_namespace.h>
44#include <net/snmp.h> 45#include <net/snmp.h>
45#include <net/ipv6.h> 46#include <net/ipv6.h>
@@ -814,20 +815,13 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
814{ 815{
815 int flags = 0; 816 int flags = 0;
816 817
817 if (rt6_need_strict(&fl->fl6_dst)) 818 if (fl->oif || rt6_need_strict(&fl->fl6_dst))
818 flags |= RT6_LOOKUP_F_IFACE; 819 flags |= RT6_LOOKUP_F_IFACE;
819 820
820 if (!ipv6_addr_any(&fl->fl6_src)) 821 if (!ipv6_addr_any(&fl->fl6_src))
821 flags |= RT6_LOOKUP_F_HAS_SADDR; 822 flags |= RT6_LOOKUP_F_HAS_SADDR;
822 else if (sk) { 823 else if (sk)
823 unsigned int prefs = inet6_sk(sk)->srcprefs; 824 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
824 if (prefs & IPV6_PREFER_SRC_TMP)
825 flags |= RT6_LOOKUP_F_SRCPREF_TMP;
826 if (prefs & IPV6_PREFER_SRC_PUBLIC)
827 flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC;
828 if (prefs & IPV6_PREFER_SRC_COA)
829 flags |= RT6_LOOKUP_F_SRCPREF_COA;
830 }
831 825
832 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); 826 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
833} 827}
@@ -886,7 +880,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
886 880
887 rt = (struct rt6_info *) dst; 881 rt = (struct rt6_info *) dst;
888 882
889 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) 883 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
890 return dst; 884 return dst;
891 885
892 return NULL; 886 return NULL;
@@ -897,19 +891,24 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
897 struct rt6_info *rt = (struct rt6_info *) dst; 891 struct rt6_info *rt = (struct rt6_info *) dst;
898 892
899 if (rt) { 893 if (rt) {
900 if (rt->rt6i_flags & RTF_CACHE) 894 if (rt->rt6i_flags & RTF_CACHE) {
901 ip6_del_rt(rt); 895 if (rt6_check_expired(rt)) {
902 else 896 ip6_del_rt(rt);
897 dst = NULL;
898 }
899 } else {
903 dst_release(dst); 900 dst_release(dst);
901 dst = NULL;
902 }
904 } 903 }
905 return NULL; 904 return dst;
906} 905}
907 906
908static void ip6_link_failure(struct sk_buff *skb) 907static void ip6_link_failure(struct sk_buff *skb)
909{ 908{
910 struct rt6_info *rt; 909 struct rt6_info *rt;
911 910
912 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); 911 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
913 912
914 rt = (struct rt6_info *) skb_dst(skb); 913 rt = (struct rt6_info *) skb_dst(skb);
915 if (rt) { 914 if (rt) {
@@ -1873,7 +1872,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1873 switch (ipstats_mib_noroutes) { 1872 switch (ipstats_mib_noroutes) {
1874 case IPSTATS_MIB_INNOROUTES: 1873 case IPSTATS_MIB_INNOROUTES:
1875 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 1874 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1876 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) { 1875 if (type == IPV6_ADDR_ANY) {
1877 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), 1876 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1878 IPSTATS_MIB_INADDRERRORS); 1877 IPSTATS_MIB_INADDRERRORS);
1879 break; 1878 break;
@@ -1884,7 +1883,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1884 ipstats_mib_noroutes); 1883 ipstats_mib_noroutes);
1885 break; 1884 break;
1886 } 1885 }
1887 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev); 1886 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
1888 kfree_skb(skb); 1887 kfree_skb(skb);
1889 return 0; 1888 return 0;
1890} 1889}
@@ -2612,7 +2611,7 @@ ctl_table ipv6_route_table_template[] = {
2612 { } 2611 { }
2613}; 2612};
2614 2613
2615struct ctl_table *ipv6_route_sysctl_init(struct net *net) 2614struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2616{ 2615{
2617 struct ctl_table *table; 2616 struct ctl_table *table;
2618 2617
@@ -2637,7 +2636,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2637} 2636}
2638#endif 2637#endif
2639 2638
2640static int ip6_route_net_init(struct net *net) 2639static int __net_init ip6_route_net_init(struct net *net)
2641{ 2640{
2642 int ret = -ENOMEM; 2641 int ret = -ENOMEM;
2643 2642
@@ -2702,7 +2701,7 @@ out_ip6_dst_ops:
2702 goto out; 2701 goto out;
2703} 2702}
2704 2703
2705static void ip6_route_net_exit(struct net *net) 2704static void __net_exit ip6_route_net_exit(struct net *net)
2706{ 2705{
2707#ifdef CONFIG_PROC_FS 2706#ifdef CONFIG_PROC_FS
2708 proc_net_remove(net, "ipv6_route"); 2707 proc_net_remove(net, "ipv6_route");
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 976e68244b99..5abae10cd884 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -28,6 +28,7 @@
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/if_arp.h> 29#include <linux/if_arp.h>
30#include <linux/icmp.h> 30#include <linux/icmp.h>
31#include <linux/slab.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/netfilter_ipv4.h> 34#include <linux/netfilter_ipv4.h>
@@ -62,7 +63,6 @@
62#define HASH_SIZE 16 63#define HASH_SIZE 16
63#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 64#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
64 65
65static void ipip6_fb_tunnel_init(struct net_device *dev);
66static void ipip6_tunnel_init(struct net_device *dev); 66static void ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 67static void ipip6_tunnel_setup(struct net_device *dev);
68 68
@@ -364,7 +364,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
364 goto out; 364 goto out;
365 } 365 }
366 366
367 INIT_RCU_HEAD(&p->rcu_head);
368 p->next = t->prl; 367 p->next = t->prl;
369 p->addr = a->addr; 368 p->addr = a->addr;
370 p->flags = a->flags; 369 p->flags = a->flags;
@@ -745,7 +744,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
745 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 744 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
746 745
747 if (skb->len > mtu) { 746 if (skb->len > mtu) {
748 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 747 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
749 ip_rt_put(rt); 748 ip_rt_put(rt);
750 goto tx_error; 749 goto tx_error;
751 } 750 }
@@ -1120,7 +1119,7 @@ static void ipip6_tunnel_init(struct net_device *dev)
1120 ipip6_tunnel_bind_dev(dev); 1119 ipip6_tunnel_bind_dev(dev);
1121} 1120}
1122 1121
1123static void ipip6_fb_tunnel_init(struct net_device *dev) 1122static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1124{ 1123{
1125 struct ip_tunnel *tunnel = netdev_priv(dev); 1124 struct ip_tunnel *tunnel = netdev_priv(dev);
1126 struct iphdr *iph = &tunnel->parms.iph; 1125 struct iphdr *iph = &tunnel->parms.iph;
@@ -1145,7 +1144,7 @@ static struct xfrm_tunnel sit_handler = {
1145 .priority = 1, 1144 .priority = 1,
1146}; 1145};
1147 1146
1148static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) 1147static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1149{ 1148{
1150 int prio; 1149 int prio;
1151 1150
@@ -1162,7 +1161,7 @@ static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1162 } 1161 }
1163} 1162}
1164 1163
1165static int sit_init_net(struct net *net) 1164static int __net_init sit_init_net(struct net *net)
1166{ 1165{
1167 struct sit_net *sitn = net_generic(net, sit_net_id); 1166 struct sit_net *sitn = net_generic(net, sit_net_id);
1168 int err; 1167 int err;
@@ -1195,7 +1194,7 @@ err_alloc_dev:
1195 return err; 1194 return err;
1196} 1195}
1197 1196
1198static void sit_exit_net(struct net *net) 1197static void __net_exit sit_exit_net(struct net *net)
1199{ 1198{
1200 struct sit_net *sitn = net_generic(net, sit_net_id); 1199 struct sit_net *sitn = net_generic(net, sit_net_id);
1201 LIST_HEAD(list); 1200 LIST_HEAD(list);
@@ -1228,15 +1227,14 @@ static int __init sit_init(void)
1228 1227
1229 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); 1228 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
1230 1229
1231 if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) {
1232 printk(KERN_INFO "sit init: Can't add protocol\n");
1233 return -EAGAIN;
1234 }
1235
1236 err = register_pernet_device(&sit_net_ops); 1230 err = register_pernet_device(&sit_net_ops);
1237 if (err < 0) 1231 if (err < 0)
1238 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1232 return err;
1239 1233 err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1234 if (err < 0) {
1235 unregister_pernet_device(&sit_net_ops);
1236 printk(KERN_INFO "sit init: Can't add protocol\n");
1237 }
1240 return err; 1238 return err;
1241} 1239}
1242 1240
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7208a06576c6..34d1f0690d7e 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -269,7 +269,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
270 tcp_select_initial_window(tcp_full_space(sk), req->mss, 270 tcp_select_initial_window(tcp_full_space(sk), req->mss,
271 &req->rcv_wnd, &req->window_clamp, 271 &req->rcv_wnd, &req->window_clamp,
272 ireq->wscale_ok, &rcv_wscale); 272 ireq->wscale_ok, &rcv_wscale,
273 dst_metric(dst, RTAX_INITRWND));
273 274
274 ireq->rcv_wscale = rcv_wscale; 275 ireq->rcv_wscale = rcv_wscale;
275 276
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index c690736885b4..fa1d8f4e0051 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -9,6 +9,7 @@
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <linux/in6.h> 10#include <linux/in6.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <linux/slab.h>
12#include <net/ndisc.h> 13#include <net/ndisc.h>
13#include <net/ipv6.h> 14#include <net/ipv6.h>
14#include <net/addrconf.h> 15#include <net/addrconf.h>
@@ -55,7 +56,7 @@ struct ctl_path net_ipv6_ctl_path[] = {
55}; 56};
56EXPORT_SYMBOL_GPL(net_ipv6_ctl_path); 57EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
57 58
58static int ipv6_sysctl_net_init(struct net *net) 59static int __net_init ipv6_sysctl_net_init(struct net *net)
59{ 60{
60 struct ctl_table *ipv6_table; 61 struct ctl_table *ipv6_table;
61 struct ctl_table *ipv6_route_table; 62 struct ctl_table *ipv6_route_table;
@@ -98,7 +99,7 @@ out_ipv6_table:
98 goto out; 99 goto out;
99} 100}
100 101
101static void ipv6_sysctl_net_exit(struct net *net) 102static void __net_exit ipv6_sysctl_net_exit(struct net *net)
102{ 103{
103 struct ctl_table *ipv6_table; 104 struct ctl_table *ipv6_table;
104 struct ctl_table *ipv6_route_table; 105 struct ctl_table *ipv6_route_table;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index febfd595a40d..075f540ec197 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -38,6 +38,7 @@
38#include <linux/jhash.h> 38#include <linux/jhash.h>
39#include <linux/ipsec.h> 39#include <linux/ipsec.h>
40#include <linux/times.h> 40#include <linux/times.h>
41#include <linux/slab.h>
41 42
42#include <linux/ipv6.h> 43#include <linux/ipv6.h>
43#include <linux/icmpv6.h> 44#include <linux/icmpv6.h>
@@ -520,6 +521,13 @@ done:
520 return err; 521 return err;
521} 522}
522 523
524static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
525 struct request_values *rvp)
526{
527 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
528 return tcp_v6_send_synack(sk, req, rvp);
529}
530
523static inline void syn_flood_warning(struct sk_buff *skb) 531static inline void syn_flood_warning(struct sk_buff *skb)
524{ 532{
525#ifdef CONFIG_SYN_COOKIES 533#ifdef CONFIG_SYN_COOKIES
@@ -876,7 +884,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
876 884
877 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 885 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
878 if (net_ratelimit()) { 886 if (net_ratelimit()) {
879 printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n", 887 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
880 genhash ? "failed" : "mismatch", 888 genhash ? "failed" : "mismatch",
881 &ip6h->saddr, ntohs(th->source), 889 &ip6h->saddr, ntohs(th->source),
882 &ip6h->daddr, ntohs(th->dest)); 890 &ip6h->daddr, ntohs(th->dest));
@@ -890,10 +898,11 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
890struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 898struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
891 .family = AF_INET6, 899 .family = AF_INET6,
892 .obj_size = sizeof(struct tcp6_request_sock), 900 .obj_size = sizeof(struct tcp6_request_sock),
893 .rtx_syn_ack = tcp_v6_send_synack, 901 .rtx_syn_ack = tcp_v6_rtx_synack,
894 .send_ack = tcp_v6_reqsk_send_ack, 902 .send_ack = tcp_v6_reqsk_send_ack,
895 .destructor = tcp_v6_reqsk_destructor, 903 .destructor = tcp_v6_reqsk_destructor,
896 .send_reset = tcp_v6_send_reset 904 .send_reset = tcp_v6_send_reset,
905 .syn_ack_timeout = tcp_syn_ack_timeout,
897}; 906};
898 907
899#ifdef CONFIG_TCP_MD5SIG 908#ifdef CONFIG_TCP_MD5SIG
@@ -1006,7 +1015,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1006 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1015 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1007 1016
1008 t1 = (struct tcphdr *) skb_push(buff, tot_len); 1017 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1009 skb_reset_transport_header(skb); 1018 skb_reset_transport_header(buff);
1010 1019
1011 /* Swap the send and the receive. */ 1020 /* Swap the send and the receive. */
1012 memset(t1, 0, sizeof(*t1)); 1021 memset(t1, 0, sizeof(*t1));
@@ -1732,8 +1741,11 @@ process:
1732 if (!tcp_prequeue(sk, skb)) 1741 if (!tcp_prequeue(sk, skb))
1733 ret = tcp_v6_do_rcv(sk, skb); 1742 ret = tcp_v6_do_rcv(sk, skb);
1734 } 1743 }
1735 } else 1744 } else if (unlikely(sk_add_backlog(sk, skb))) {
1736 sk_add_backlog(sk, skb); 1745 bh_unlock_sock(sk);
1746 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1747 goto discard_and_relse;
1748 }
1737 bh_unlock_sock(sk); 1749 bh_unlock_sock(sk);
1738 1750
1739 sock_put(sk); 1751 sock_put(sk);
@@ -2105,7 +2117,7 @@ static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2105 }, 2117 },
2106}; 2118};
2107 2119
2108int tcp6_proc_init(struct net *net) 2120int __net_init tcp6_proc_init(struct net *net)
2109{ 2121{
2110 return tcp_proc_register(net, &tcp6_seq_afinfo); 2122 return tcp_proc_register(net, &tcp6_seq_afinfo);
2111} 2123}
@@ -2174,18 +2186,18 @@ static struct inet_protosw tcpv6_protosw = {
2174 INET_PROTOSW_ICSK, 2186 INET_PROTOSW_ICSK,
2175}; 2187};
2176 2188
2177static int tcpv6_net_init(struct net *net) 2189static int __net_init tcpv6_net_init(struct net *net)
2178{ 2190{
2179 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, 2191 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2180 SOCK_RAW, IPPROTO_TCP, net); 2192 SOCK_RAW, IPPROTO_TCP, net);
2181} 2193}
2182 2194
2183static void tcpv6_net_exit(struct net *net) 2195static void __net_exit tcpv6_net_exit(struct net *net)
2184{ 2196{
2185 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2197 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2186} 2198}
2187 2199
2188static void tcpv6_net_exit_batch(struct list_head *net_exit_list) 2200static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2189{ 2201{
2190 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); 2202 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2191} 2203}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 51e2832d13a6..fc3c86a47452 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -25,6 +25,7 @@
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/slab.h>
28#include <net/ipv6.h> 29#include <net/ipv6.h>
29#include <net/protocol.h> 30#include <net/protocol.h>
30#include <net/xfrm.h> 31#include <net/xfrm.h>
@@ -98,7 +99,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
98 if (!handler->handler(skb)) 99 if (!handler->handler(skb))
99 return 0; 100 return 0;
100 101
101 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); 102 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
102 103
103drop: 104drop:
104 kfree_skb(skb); 105 kfree_skb(skb);
@@ -116,7 +117,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
116 if (!handler->handler(skb)) 117 if (!handler->handler(skb))
117 return 0; 118 return 0;
118 119
119 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); 120 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
120 121
121drop: 122drop:
122 kfree_skb(skb); 123 kfree_skb(skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c47..90824852f598 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/slab.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
39#include <net/ndisc.h> 40#include <net/ndisc.h>
@@ -258,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
258 if (hslot->count < hslot2->count) 259 if (hslot->count < hslot2->count)
259 goto begin; 260 goto begin;
260 261
261 result = udp6_lib_lookup2(net, &in6addr_any, sport, 262 result = udp6_lib_lookup2(net, saddr, sport,
262 daddr, hnum, dif, 263 &in6addr_any, hnum, dif,
263 hslot2, slot2); 264 hslot2, slot2);
264 } 265 }
265 rcu_read_unlock(); 266 rcu_read_unlock();
@@ -322,7 +323,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
322 struct ipv6_pinfo *np = inet6_sk(sk); 323 struct ipv6_pinfo *np = inet6_sk(sk);
323 struct inet_sock *inet = inet_sk(sk); 324 struct inet_sock *inet = inet_sk(sk);
324 struct sk_buff *skb; 325 struct sk_buff *skb;
325 unsigned int ulen, copied; 326 unsigned int ulen;
326 int peeked; 327 int peeked;
327 int err; 328 int err;
328 int is_udplite = IS_UDPLITE(sk); 329 int is_udplite = IS_UDPLITE(sk);
@@ -341,10 +342,9 @@ try_again:
341 goto out; 342 goto out;
342 343
343 ulen = skb->len - sizeof(struct udphdr); 344 ulen = skb->len - sizeof(struct udphdr);
344 copied = len; 345 if (len > ulen)
345 if (copied > ulen) 346 len = ulen;
346 copied = ulen; 347 else if (len < ulen)
347 else if (copied < ulen)
348 msg->msg_flags |= MSG_TRUNC; 348 msg->msg_flags |= MSG_TRUNC;
349 349
350 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 350 is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -355,14 +355,14 @@ try_again:
355 * coverage checksum (UDP-Lite), do it before the copy. 355 * coverage checksum (UDP-Lite), do it before the copy.
356 */ 356 */
357 357
358 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 358 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
359 if (udp_lib_checksum_complete(skb)) 359 if (udp_lib_checksum_complete(skb))
360 goto csum_copy_err; 360 goto csum_copy_err;
361 } 361 }
362 362
363 if (skb_csum_unnecessary(skb)) 363 if (skb_csum_unnecessary(skb))
364 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 364 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
365 msg->msg_iov, copied ); 365 msg->msg_iov,len);
366 else { 366 else {
367 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 367 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
368 if (err == -EINVAL) 368 if (err == -EINVAL)
@@ -411,7 +411,7 @@ try_again:
411 datagram_recv_ctl(sk, msg, skb); 411 datagram_recv_ctl(sk, msg, skb);
412 } 412 }
413 413
414 err = copied; 414 err = len;
415 if (flags & MSG_TRUNC) 415 if (flags & MSG_TRUNC)
416 err = ulen; 416 err = ulen;
417 417
@@ -584,16 +584,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
584 bh_lock_sock(sk); 584 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk)) 585 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1); 586 udpv6_queue_rcv_skb(sk, skb1);
587 else 587 else if (sk_add_backlog(sk, skb1)) {
588 sk_add_backlog(sk, skb1); 588 kfree_skb(skb1);
589 bh_unlock_sock(sk);
590 goto drop;
591 }
589 bh_unlock_sock(sk); 592 bh_unlock_sock(sk);
590 } else { 593 continue;
591 atomic_inc(&sk->sk_drops);
592 UDP6_INC_STATS_BH(sock_net(sk),
593 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
594 UDP6_INC_STATS_BH(sock_net(sk),
595 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 } 594 }
595drop:
596 atomic_inc(&sk->sk_drops);
597 UDP6_INC_STATS_BH(sock_net(sk),
598 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
599 UDP6_INC_STATS_BH(sock_net(sk),
600 UDP_MIB_INERRORS, IS_UDPLITE(sk));
597 } 601 }
598} 602}
599/* 603/*
@@ -681,12 +685,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
681int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 685int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
682 int proto) 686 int proto)
683{ 687{
688 struct net *net = dev_net(skb->dev);
684 struct sock *sk; 689 struct sock *sk;
685 struct udphdr *uh; 690 struct udphdr *uh;
686 struct net_device *dev = skb->dev;
687 struct in6_addr *saddr, *daddr; 691 struct in6_addr *saddr, *daddr;
688 u32 ulen = 0; 692 u32 ulen = 0;
689 struct net *net = dev_net(skb->dev);
690 693
691 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 694 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
692 goto short_packet; 695 goto short_packet;
@@ -745,7 +748,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
745 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, 748 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
746 proto == IPPROTO_UDPLITE); 749 proto == IPPROTO_UDPLITE);
747 750
748 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 751 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
749 752
750 kfree_skb(skb); 753 kfree_skb(skb);
751 return 0; 754 return 0;
@@ -756,8 +759,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
756 bh_lock_sock(sk); 759 bh_lock_sock(sk);
757 if (!sock_owned_by_user(sk)) 760 if (!sock_owned_by_user(sk))
758 udpv6_queue_rcv_skb(sk, skb); 761 udpv6_queue_rcv_skb(sk, skb);
759 else 762 else if (sk_add_backlog(sk, skb)) {
760 sk_add_backlog(sk, skb); 763 atomic_inc(&sk->sk_drops);
764 bh_unlock_sock(sk);
765 sock_put(sk);
766 goto discard;
767 }
761 bh_unlock_sock(sk); 768 bh_unlock_sock(sk);
762 sock_put(sk); 769 sock_put(sk);
763 return 0; 770 return 0;
@@ -1396,7 +1403,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = {
1396 }, 1403 },
1397}; 1404};
1398 1405
1399int udp6_proc_init(struct net *net) 1406int __net_init udp6_proc_init(struct net *net)
1400{ 1407{
1401 return udp_proc_register(net, &udp6_seq_afinfo); 1408 return udp_proc_register(net, &udp6_seq_afinfo);
1402} 1409}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 6ea6938919e6..5f48fadc27f7 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -104,12 +104,12 @@ static struct udp_seq_afinfo udplite6_seq_afinfo = {
104 }, 104 },
105}; 105};
106 106
107static int udplite6_proc_init_net(struct net *net) 107static int __net_init udplite6_proc_init_net(struct net *net)
108{ 108{
109 return udp_proc_register(net, &udplite6_seq_afinfo); 109 return udp_proc_register(net, &udplite6_seq_afinfo);
110} 110}
111 111
112static void udplite6_proc_exit_net(struct net *net) 112static void __net_exit udplite6_proc_exit_net(struct net *net)
113{ 113{
114 udp_proc_unregister(net, &udplite6_seq_afinfo); 114 udp_proc_unregister(net, &udplite6_seq_afinfo);
115} 115}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 9084582d236b..2bc98ede1235 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -101,7 +101,7 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
101 break; 101 break;
102 } 102 }
103 103
104 x = xfrm_state_lookup_byaddr(net, dst, src, proto, AF_INET6); 104 x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6);
105 if (!x) 105 if (!x)
106 continue; 106 continue;
107 107
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 3927832227b9..b809812c8d30 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -5,6 +5,7 @@
5 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
6 */ 6 */
7 7
8#include <linux/gfp.h>
8#include <linux/init.h> 9#include <linux/init.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/module.h> 11#include <linux/module.h>
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index c4f4eef032a3..0c92112dcba3 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -38,7 +38,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
38 38
39 if (!skb->local_df && skb->len > mtu) { 39 if (!skb->local_df && skb->len > mtu) {
40 skb->dev = dst->dev; 40 skb->dev = dst->dev;
41 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 41 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
42 ret = -EMSGSIZE; 42 ret = -EMSGSIZE;
43 } 43 }
44 44
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index dbdc696f5fc5..00bf7c962b7e 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -116,14 +116,15 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
116 return 0; 116 return 0;
117} 117}
118 118
119static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) 119static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
120 struct flowi *fl)
120{ 121{
121 struct rt6_info *rt = (struct rt6_info*)xdst->route; 122 struct rt6_info *rt = (struct rt6_info*)xdst->route;
122 123
123 xdst->u.dst.dev = dev; 124 xdst->u.dst.dev = dev;
124 dev_hold(dev); 125 dev_hold(dev);
125 126
126 xdst->u.rt6.rt6i_idev = in6_dev_get(rt->u.dst.dev); 127 xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
127 if (!xdst->u.rt6.rt6i_idev) 128 if (!xdst->u.rt6.rt6i_idev)
128 return -ENODEV; 129 return -ENODEV;
129 130
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 438831d33593..2ce3a8278f26 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/xfrm.h> 25#include <linux/xfrm.h>
26#include <linux/slab.h>
26#include <linux/rculist.h> 27#include <linux/rculist.h>
27#include <net/ip.h> 28#include <net/ip.h>
28#include <net/xfrm.h> 29#include <net/xfrm.h>
@@ -30,6 +31,25 @@
30#include <linux/ipv6.h> 31#include <linux/ipv6.h>
31#include <linux/icmpv6.h> 32#include <linux/icmpv6.h>
32#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <net/netns/generic.h>
35
36#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
37#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
38
39#define XFRM6_TUNNEL_SPI_MIN 1
40#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
41
42struct xfrm6_tunnel_net {
43 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
44 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
45 u32 spi;
46};
47
48static int xfrm6_tunnel_net_id __read_mostly;
49static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
50{
51 return net_generic(net, xfrm6_tunnel_net_id);
52}
33 53
34/* 54/*
35 * xfrm_tunnel_spi things are for allocating unique id ("spi") 55 * xfrm_tunnel_spi things are for allocating unique id ("spi")
@@ -46,19 +66,8 @@ struct xfrm6_tunnel_spi {
46 66
47static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); 67static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
48 68
49static u32 xfrm6_tunnel_spi;
50
51#define XFRM6_TUNNEL_SPI_MIN 1
52#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
53
54static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
55 70
56#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
57#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
58
59static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
60static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
61
62static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
63{ 72{
64 unsigned h; 73 unsigned h;
@@ -76,50 +85,14 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
76 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
77} 86}
78 87
79 88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
80static int xfrm6_tunnel_spi_init(void)
81{
82 int i;
83
84 xfrm6_tunnel_spi = 0;
85 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
86 sizeof(struct xfrm6_tunnel_spi),
87 0, SLAB_HWCACHE_ALIGN,
88 NULL);
89 if (!xfrm6_tunnel_spi_kmem)
90 return -ENOMEM;
91
92 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
93 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
94 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
95 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
96 return 0;
97}
98
99static void xfrm6_tunnel_spi_fini(void)
100{
101 int i;
102
103 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
104 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
105 return;
106 }
107 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
108 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
109 return;
110 }
111 rcu_barrier();
112 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
113 xfrm6_tunnel_spi_kmem = NULL;
114}
115
116static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
117{ 89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
118 struct xfrm6_tunnel_spi *x6spi; 91 struct xfrm6_tunnel_spi *x6spi;
119 struct hlist_node *pos; 92 struct hlist_node *pos;
120 93
121 hlist_for_each_entry_rcu(x6spi, pos, 94 hlist_for_each_entry_rcu(x6spi, pos,
122 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 95 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
123 list_byaddr) { 96 list_byaddr) {
124 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) 97 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
125 return x6spi; 98 return x6spi;
@@ -128,13 +101,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
128 return NULL; 101 return NULL;
129} 102}
130 103
131__be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 104__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
132{ 105{
133 struct xfrm6_tunnel_spi *x6spi; 106 struct xfrm6_tunnel_spi *x6spi;
134 u32 spi; 107 u32 spi;
135 108
136 rcu_read_lock_bh(); 109 rcu_read_lock_bh();
137 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 110 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
138 spi = x6spi ? x6spi->spi : 0; 111 spi = x6spi ? x6spi->spi : 0;
139 rcu_read_unlock_bh(); 112 rcu_read_unlock_bh();
140 return htonl(spi); 113 return htonl(spi);
@@ -142,14 +115,15 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
142 115
143EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); 116EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
144 117
145static int __xfrm6_tunnel_spi_check(u32 spi) 118static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
146{ 119{
120 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
147 struct xfrm6_tunnel_spi *x6spi; 121 struct xfrm6_tunnel_spi *x6spi;
148 int index = xfrm6_tunnel_spi_hash_byspi(spi); 122 int index = xfrm6_tunnel_spi_hash_byspi(spi);
149 struct hlist_node *pos; 123 struct hlist_node *pos;
150 124
151 hlist_for_each_entry(x6spi, pos, 125 hlist_for_each_entry(x6spi, pos,
152 &xfrm6_tunnel_spi_byspi[index], 126 &xfrm6_tn->spi_byspi[index],
153 list_byspi) { 127 list_byspi) {
154 if (x6spi->spi == spi) 128 if (x6spi->spi == spi)
155 return -1; 129 return -1;
@@ -157,61 +131,61 @@ static int __xfrm6_tunnel_spi_check(u32 spi)
157 return index; 131 return index;
158} 132}
159 133
160static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 134static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
161{ 135{
136 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
162 u32 spi; 137 u32 spi;
163 struct xfrm6_tunnel_spi *x6spi; 138 struct xfrm6_tunnel_spi *x6spi;
164 int index; 139 int index;
165 140
166 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 141 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
167 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 142 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
168 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 143 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
169 else 144 else
170 xfrm6_tunnel_spi++; 145 xfrm6_tn->spi++;
171 146
172 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { 147 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
173 index = __xfrm6_tunnel_spi_check(spi); 148 index = __xfrm6_tunnel_spi_check(net, spi);
174 if (index >= 0) 149 if (index >= 0)
175 goto alloc_spi; 150 goto alloc_spi;
176 } 151 }
177 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { 152 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
178 index = __xfrm6_tunnel_spi_check(spi); 153 index = __xfrm6_tunnel_spi_check(net, spi);
179 if (index >= 0) 154 if (index >= 0)
180 goto alloc_spi; 155 goto alloc_spi;
181 } 156 }
182 spi = 0; 157 spi = 0;
183 goto out; 158 goto out;
184alloc_spi: 159alloc_spi:
185 xfrm6_tunnel_spi = spi; 160 xfrm6_tn->spi = spi;
186 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); 161 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
187 if (!x6spi) 162 if (!x6spi)
188 goto out; 163 goto out;
189 164
190 INIT_RCU_HEAD(&x6spi->rcu_head);
191 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 165 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
192 x6spi->spi = spi; 166 x6spi->spi = spi;
193 atomic_set(&x6spi->refcnt, 1); 167 atomic_set(&x6spi->refcnt, 1);
194 168
195 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); 169 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
196 170
197 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 171 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
198 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 172 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
199out: 173out:
200 return spi; 174 return spi;
201} 175}
202 176
203__be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 177__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
204{ 178{
205 struct xfrm6_tunnel_spi *x6spi; 179 struct xfrm6_tunnel_spi *x6spi;
206 u32 spi; 180 u32 spi;
207 181
208 spin_lock_bh(&xfrm6_tunnel_spi_lock); 182 spin_lock_bh(&xfrm6_tunnel_spi_lock);
209 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 183 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
210 if (x6spi) { 184 if (x6spi) {
211 atomic_inc(&x6spi->refcnt); 185 atomic_inc(&x6spi->refcnt);
212 spi = x6spi->spi; 186 spi = x6spi->spi;
213 } else 187 } else
214 spi = __xfrm6_tunnel_alloc_spi(saddr); 188 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
215 spin_unlock_bh(&xfrm6_tunnel_spi_lock); 189 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
216 190
217 return htonl(spi); 191 return htonl(spi);
@@ -225,15 +199,16 @@ static void x6spi_destroy_rcu(struct rcu_head *head)
225 container_of(head, struct xfrm6_tunnel_spi, rcu_head)); 199 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
226} 200}
227 201
228void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) 202void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
229{ 203{
204 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
230 struct xfrm6_tunnel_spi *x6spi; 205 struct xfrm6_tunnel_spi *x6spi;
231 struct hlist_node *pos, *n; 206 struct hlist_node *pos, *n;
232 207
233 spin_lock_bh(&xfrm6_tunnel_spi_lock); 208 spin_lock_bh(&xfrm6_tunnel_spi_lock);
234 209
235 hlist_for_each_entry_safe(x6spi, pos, n, 210 hlist_for_each_entry_safe(x6spi, pos, n,
236 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 211 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
237 list_byaddr) 212 list_byaddr)
238 { 213 {
239 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 214 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
@@ -263,10 +238,11 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
263 238
264static int xfrm6_tunnel_rcv(struct sk_buff *skb) 239static int xfrm6_tunnel_rcv(struct sk_buff *skb)
265{ 240{
241 struct net *net = dev_net(skb->dev);
266 struct ipv6hdr *iph = ipv6_hdr(skb); 242 struct ipv6hdr *iph = ipv6_hdr(skb);
267 __be32 spi; 243 __be32 spi;
268 244
269 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 245 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr);
270 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 246 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
271} 247}
272 248
@@ -326,7 +302,9 @@ static int xfrm6_tunnel_init_state(struct xfrm_state *x)
326 302
327static void xfrm6_tunnel_destroy(struct xfrm_state *x) 303static void xfrm6_tunnel_destroy(struct xfrm_state *x)
328{ 304{
329 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 305 struct net *net = xs_net(x);
306
307 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
330} 308}
331 309
332static const struct xfrm_type xfrm6_tunnel_type = { 310static const struct xfrm_type xfrm6_tunnel_type = {
@@ -351,34 +329,73 @@ static struct xfrm6_tunnel xfrm46_tunnel_handler = {
351 .priority = 2, 329 .priority = 2,
352}; 330};
353 331
332static int __net_init xfrm6_tunnel_net_init(struct net *net)
333{
334 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
335 unsigned int i;
336
337 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
338 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
339 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
340 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
341 xfrm6_tn->spi = 0;
342
343 return 0;
344}
345
346static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
347{
348}
349
350static struct pernet_operations xfrm6_tunnel_net_ops = {
351 .init = xfrm6_tunnel_net_init,
352 .exit = xfrm6_tunnel_net_exit,
353 .id = &xfrm6_tunnel_net_id,
354 .size = sizeof(struct xfrm6_tunnel_net),
355};
356
354static int __init xfrm6_tunnel_init(void) 357static int __init xfrm6_tunnel_init(void)
355{ 358{
356 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) 359 int rv;
357 goto err; 360
358 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6)) 361 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
359 goto unreg; 362 sizeof(struct xfrm6_tunnel_spi),
360 if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET)) 363 0, SLAB_HWCACHE_ALIGN,
361 goto dereg6; 364 NULL);
362 if (xfrm6_tunnel_spi_init() < 0) 365 if (!xfrm6_tunnel_spi_kmem)
363 goto dereg46; 366 return -ENOMEM;
367 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
368 if (rv < 0)
369 goto out_pernet;
370 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
371 if (rv < 0)
372 goto out_type;
373 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
374 if (rv < 0)
375 goto out_xfrm6;
376 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
377 if (rv < 0)
378 goto out_xfrm46;
364 return 0; 379 return 0;
365 380
366dereg46: 381out_xfrm46:
367 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
368dereg6:
369 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 382 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
370unreg: 383out_xfrm6:
371 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 384 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
372err: 385out_type:
373 return -EAGAIN; 386 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
387out_pernet:
388 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
389 return rv;
374} 390}
375 391
376static void __exit xfrm6_tunnel_fini(void) 392static void __exit xfrm6_tunnel_fini(void)
377{ 393{
378 xfrm6_tunnel_spi_fini();
379 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); 394 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
380 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 395 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
381 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 396 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
397 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
398 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
382} 399}
383 400
384module_init(xfrm6_tunnel_init); 401module_init(xfrm6_tunnel_init);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index f9759b54a6de..da3d21c41d90 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -40,6 +40,7 @@
40#include <linux/net.h> 40#include <linux/net.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/uio.h> 42#include <linux/uio.h>
43#include <linux/slab.h>
43#include <linux/skbuff.h> 44#include <linux/skbuff.h>
44#include <linux/smp_lock.h> 45#include <linux/smp_lock.h>
45#include <linux/socket.h> 46#include <linux/socket.h>
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 576178482f89..26b5bfcf1d03 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -13,45 +13,15 @@
13#include <net/tcp_states.h> 13#include <net/tcp_states.h>
14#include <net/ipx.h> 14#include <net/ipx.h>
15 15
16static __inline__ struct ipx_interface *ipx_get_interface_idx(loff_t pos)
17{
18 struct ipx_interface *i;
19
20 list_for_each_entry(i, &ipx_interfaces, node)
21 if (!pos--)
22 goto out;
23 i = NULL;
24out:
25 return i;
26}
27
28static struct ipx_interface *ipx_interfaces_next(struct ipx_interface *i)
29{
30 struct ipx_interface *rc = NULL;
31
32 if (i->node.next != &ipx_interfaces)
33 rc = list_entry(i->node.next, struct ipx_interface, node);
34 return rc;
35}
36
37static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos) 16static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos)
38{ 17{
39 loff_t l = *pos;
40
41 spin_lock_bh(&ipx_interfaces_lock); 18 spin_lock_bh(&ipx_interfaces_lock);
42 return l ? ipx_get_interface_idx(--l) : SEQ_START_TOKEN; 19 return seq_list_start_head(&ipx_interfaces, *pos);
43} 20}
44 21
45static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos) 22static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
46{ 23{
47 struct ipx_interface *i; 24 return seq_list_next(v, &ipx_interfaces, pos);
48
49 ++*pos;
50 if (v == SEQ_START_TOKEN)
51 i = ipx_interfaces_head();
52 else
53 i = ipx_interfaces_next(v);
54 return i;
55} 25}
56 26
57static void ipx_seq_interface_stop(struct seq_file *seq, void *v) 27static void ipx_seq_interface_stop(struct seq_file *seq, void *v)
@@ -63,7 +33,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
63{ 33{
64 struct ipx_interface *i; 34 struct ipx_interface *i;
65 35
66 if (v == SEQ_START_TOKEN) { 36 if (v == &ipx_interfaces) {
67 seq_puts(seq, "Network Node_Address Primary Device " 37 seq_puts(seq, "Network Node_Address Primary Device "
68 "Frame_Type"); 38 "Frame_Type");
69#ifdef IPX_REFCNT_DEBUG 39#ifdef IPX_REFCNT_DEBUG
@@ -73,7 +43,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
73 goto out; 43 goto out;
74 } 44 }
75 45
76 i = v; 46 i = list_entry(v, struct ipx_interface, node);
77 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); 47 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum));
78 seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", 48 seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
79 i->if_node[0], i->if_node[1], i->if_node[2], 49 i->if_node[0], i->if_node[1], i->if_node[2],
@@ -89,53 +59,15 @@ out:
89 return 0; 59 return 0;
90} 60}
91 61
92static struct ipx_route *ipx_routes_head(void)
93{
94 struct ipx_route *rc = NULL;
95
96 if (!list_empty(&ipx_routes))
97 rc = list_entry(ipx_routes.next, struct ipx_route, node);
98 return rc;
99}
100
101static struct ipx_route *ipx_routes_next(struct ipx_route *r)
102{
103 struct ipx_route *rc = NULL;
104
105 if (r->node.next != &ipx_routes)
106 rc = list_entry(r->node.next, struct ipx_route, node);
107 return rc;
108}
109
110static __inline__ struct ipx_route *ipx_get_route_idx(loff_t pos)
111{
112 struct ipx_route *r;
113
114 list_for_each_entry(r, &ipx_routes, node)
115 if (!pos--)
116 goto out;
117 r = NULL;
118out:
119 return r;
120}
121
122static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos) 62static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos)
123{ 63{
124 loff_t l = *pos;
125 read_lock_bh(&ipx_routes_lock); 64 read_lock_bh(&ipx_routes_lock);
126 return l ? ipx_get_route_idx(--l) : SEQ_START_TOKEN; 65 return seq_list_start_head(&ipx_routes, *pos);
127} 66}
128 67
129static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) 68static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
130{ 69{
131 struct ipx_route *r; 70 return seq_list_next(v, &ipx_routes, pos);
132
133 ++*pos;
134 if (v == SEQ_START_TOKEN)
135 r = ipx_routes_head();
136 else
137 r = ipx_routes_next(v);
138 return r;
139} 71}
140 72
141static void ipx_seq_route_stop(struct seq_file *seq, void *v) 73static void ipx_seq_route_stop(struct seq_file *seq, void *v)
@@ -147,11 +79,13 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v)
147{ 79{
148 struct ipx_route *rt; 80 struct ipx_route *rt;
149 81
150 if (v == SEQ_START_TOKEN) { 82 if (v == &ipx_routes) {
151 seq_puts(seq, "Network Router_Net Router_Node\n"); 83 seq_puts(seq, "Network Router_Net Router_Node\n");
152 goto out; 84 goto out;
153 } 85 }
154 rt = v; 86
87 rt = list_entry(v, struct ipx_route, node);
88
155 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); 89 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net));
156 if (rt->ir_routed) 90 if (rt->ir_routed)
157 seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", 91 seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n",
@@ -226,9 +160,9 @@ static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
226 spin_unlock_bh(&i->if_sklist_lock); 160 spin_unlock_bh(&i->if_sklist_lock);
227 sk = NULL; 161 sk = NULL;
228 for (;;) { 162 for (;;) {
229 i = ipx_interfaces_next(i); 163 if (i->node.next == &ipx_interfaces)
230 if (!i)
231 break; 164 break;
165 i = list_entry(i->node.next, struct ipx_interface, node);
232 spin_lock_bh(&i->if_sklist_lock); 166 spin_lock_bh(&i->if_sklist_lock);
233 if (!hlist_empty(&i->if_sklist)) { 167 if (!hlist_empty(&i->if_sklist)) {
234 sk = sk_head(&i->if_sklist); 168 sk = sk_head(&i->if_sklist);
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index e16c11423527..30f4519b092f 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/route.h> 11#include <linux/route.h>
12#include <linux/slab.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
13 14
14#include <net/ipx.h> 15#include <net/ipx.h>
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 10093aab6173..2a4efcea3423 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -48,6 +48,7 @@
48#include <linux/smp_lock.h> 48#include <linux/smp_lock.h>
49#include <linux/socket.h> 49#include <linux/socket.h>
50#include <linux/sockios.h> 50#include <linux/sockios.h>
51#include <linux/slab.h>
51#include <linux/init.h> 52#include <linux/init.h>
52#include <linux/net.h> 53#include <linux/net.h>
53#include <linux/irda.h> 54#include <linux/irda.h>
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index a6f99b5a1499..c1c8ae939126 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -34,6 +34,7 @@
34#include <linux/socket.h> 34#include <linux/socket.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/slab.h>
37 38
38#include <net/irda/irda.h> 39#include <net/irda/irda.h>
39#include <net/irda/irlmp.h> 40#include <net/irda/irlmp.h>
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index 018c92941aba..e97082017f4f 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -33,6 +33,7 @@
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h>
36 37
37#include <net/irda/irda.h> 38#include <net/irda/irda.h>
38#include <net/irda/irmod.h> 39#include <net/irda/irmod.h>
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 7ba96618660e..08fb54dc8c41 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -31,6 +31,7 @@
31 ********************************************************************/ 31 ********************************************************************/
32 32
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/gfp.h>
34 35
35#include <net/irda/irda.h> 36#include <net/irda/irda.h>
36#include <net/irda/irlmp.h> 37#include <net/irda/irlmp.h>
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index d57aefd9fe77..e2e893b474e9 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -28,6 +28,7 @@
28 * 28 *
29 ********************************************************************/ 29 ********************************************************************/
30 30
31#include <linux/gfp.h>
31#include <linux/workqueue.h> 32#include <linux/workqueue.h>
32#include <linux/interrupt.h> 33#include <linux/interrupt.h>
33 34
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 811984d9324b..faa82ca2dfdc 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/slab.h>
36#include <linux/sched.h> 37#include <linux/sched.h>
37#include <linux/seq_file.h> 38#include <linux/seq_file.h>
38#include <linux/termios.h> 39#include <linux/termios.h>
@@ -496,9 +497,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
496 497
497 IRDA_DEBUG(0, "%s()\n", __func__ ); 498 IRDA_DEBUG(0, "%s()\n", __func__ );
498 499
499 if (!tty)
500 return;
501
502 IRDA_ASSERT(self != NULL, return;); 500 IRDA_ASSERT(self != NULL, return;);
503 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 501 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
504 502
@@ -1007,9 +1005,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
1007 IRDA_ASSERT(self != NULL, return;); 1005 IRDA_ASSERT(self != NULL, return;);
1008 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 1006 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
1009 1007
1010 if (!tty)
1011 return;
1012
1013 /* ircomm_tty_flush_buffer(tty); */ 1008 /* ircomm_tty_flush_buffer(tty); */
1014 ircomm_tty_shutdown(self); 1009 ircomm_tty_shutdown(self);
1015 1010
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index bf92e1473447..25cc2e695158 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -41,6 +41,7 @@
41#include <linux/tty.h> 41#include <linux/tty.h>
42#include <linux/kmod.h> 42#include <linux/kmod.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/slab.h>
44 45
45#include <asm/ioctls.h> 46#include <asm/ioctls.h>
46#include <asm/uaccess.h> 47#include <asm/uaccess.h>
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 294e34d3517c..79a1e5a23e10 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -31,6 +31,7 @@
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/slab.h>
34 35
35#include <asm/byteorder.h> 36#include <asm/byteorder.h>
36#include <asm/unaligned.h> 37#include <asm/unaligned.h>
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index a301cbd93785..703774e29e32 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -24,6 +24,8 @@
24 * 24 *
25 ********************************************************************/ 25 ********************************************************************/
26 26
27#include <linux/slab.h>
28
27#include <net/irda/irda.h> 29#include <net/irda/irda.h>
28#include <net/irda/irlmp.h> 30#include <net/irda/irlmp.h>
29#include <net/irda/iriap.h> 31#include <net/irda/iriap.h>
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 99ebb96f1386..f07ed9fd5792 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -22,6 +22,7 @@
22 * 22 *
23 ********************************************************************/ 23 ********************************************************************/
24 24
25#include <linux/slab.h>
25#include <linux/string.h> 26#include <linux/string.h>
26#include <linux/socket.h> 27#include <linux/socket.h>
27#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index 42f7d960d055..7ed3af957935 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/slab.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 315ead3cb926..a788f9e9427d 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/gfp.h>
30#include <linux/init.h> 31#include <linux/init.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
@@ -1128,34 +1129,14 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1128 */ 1129 */
1129static void *irlan_seq_start(struct seq_file *seq, loff_t *pos) 1130static void *irlan_seq_start(struct seq_file *seq, loff_t *pos)
1130{ 1131{
1131 int i = 1;
1132 struct irlan_cb *self;
1133
1134 rcu_read_lock(); 1132 rcu_read_lock();
1135 if (*pos == 0) 1133 return seq_list_start_head(&irlans, *pos);
1136 return SEQ_START_TOKEN;
1137
1138 list_for_each_entry(self, &irlans, dev_list) {
1139 if (*pos == i)
1140 return self;
1141 ++i;
1142 }
1143 return NULL;
1144} 1134}
1145 1135
1146/* Return entry after v, and increment pos */ 1136/* Return entry after v, and increment pos */
1147static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1137static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1148{ 1138{
1149 struct list_head *nxt; 1139 return seq_list_next(v, &irlans, pos);
1150
1151 ++*pos;
1152 if (v == SEQ_START_TOKEN)
1153 nxt = irlans.next;
1154 else
1155 nxt = ((struct irlan_cb *)v)->dev_list.next;
1156
1157 return (nxt == &irlans) ? NULL
1158 : list_entry(nxt, struct irlan_cb, dev_list);
1159} 1140}
1160 1141
1161/* End of reading /proc file */ 1142/* End of reading /proc file */
@@ -1170,10 +1151,10 @@ static void irlan_seq_stop(struct seq_file *seq, void *v)
1170 */ 1151 */
1171static int irlan_seq_show(struct seq_file *seq, void *v) 1152static int irlan_seq_show(struct seq_file *seq, void *v)
1172{ 1153{
1173 if (v == SEQ_START_TOKEN) 1154 if (v == &irlans)
1174 seq_puts(seq, "IrLAN instances:\n"); 1155 seq_puts(seq, "IrLAN instances:\n");
1175 else { 1156 else {
1176 struct irlan_cb *self = v; 1157 struct irlan_cb *self = list_entry(v, struct irlan_cb, dev_list);
1177 1158
1178 IRDA_ASSERT(self != NULL, return -1;); 1159 IRDA_ASSERT(self != NULL, return -1;);
1179 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 1160 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index d340110f5c0c..9616c32d1076 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -321,14 +321,15 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
321 /* Enable promiscuous mode */ 321 /* Enable promiscuous mode */
322 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n"); 322 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
323 } 323 }
324 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { 324 else if ((dev->flags & IFF_ALLMULTI) ||
325 netdev_mc_count(dev) > HW_MAX_ADDRS) {
325 /* Disable promiscuous mode, use normal mode. */ 326 /* Disable promiscuous mode, use normal mode. */
326 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); 327 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
327 /* hardware_set_filter(NULL); */ 328 /* hardware_set_filter(NULL); */
328 329
329 irlan_set_multicast_filter(self, TRUE); 330 irlan_set_multicast_filter(self, TRUE);
330 } 331 }
331 else if (dev->mc_count) { 332 else if (!netdev_mc_empty(dev)) {
332 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); 333 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
333 /* Walk the address list, and load the filter */ 334 /* Walk the address list, and load the filter */
334 /* hardware_set_filter(dev->mc_list); */ 335 /* hardware_set_filter(dev->mc_list); */
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 3f81f81b2dfa..5cf5e6c872bb 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/random.h> 35#include <linux/random.h>
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/slab.h>
37 38
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/byteorder.h> 40#include <asm/byteorder.h>
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 94a9884d7146..d434c8880745 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/slab.h>
32 33
33#include <net/irda/irda.h> 34#include <net/irda/irda.h>
34#include <net/irda/irlap_event.h> 35#include <net/irda/irlap_event.h>
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 7af2e74deda8..688222cbf55b 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -29,6 +29,7 @@
29#include <linux/if_ether.h> 29#include <linux/if_ether.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/irda.h> 31#include <linux/irda.h>
32#include <linux/slab.h>
32 33
33#include <net/pkt_sched.h> 34#include <net/pkt_sched.h>
34#include <net/sock.h> 35#include <net/sock.h>
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index b26dee784aba..df18ab4b6c5e 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -11,6 +11,7 @@
11#include "irnet_irda.h" /* Private header */ 11#include "irnet_irda.h" /* Private header */
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/seq_file.h> 13#include <linux/seq_file.h>
14#include <linux/slab.h>
14#include <asm/unaligned.h> 15#include <asm/unaligned.h>
15 16
16/* 17/*
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 6b3602de359a..6a1a202710c5 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/slab.h>
17#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
18#include "irnet_ppp.h" /* Private header */ 19#include "irnet_ppp.h" /* Private header */
19/* Please put other headers in irnet.h - Thanks */ 20/* Please put other headers in irnet.h - Thanks */
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 476b307bd801..6c7c4b92e4f8 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/socket.h> 16#include <linux/socket.h>
17#include <linux/irda.h> 17#include <linux/irda.h>
18#include <linux/gfp.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <net/irda/irda.h> 21#include <net/irda/irda.h>
@@ -124,7 +125,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
124 return ret; 125 return ret;
125} 126}
126 127
127static struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = { 128static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
128 [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, 129 [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING,
129 .len = IFNAMSIZ-1 }, 130 .len = IFNAMSIZ-1 },
130 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 }, 131 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index ba01938becb5..849aaf0dabb5 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -192,6 +192,7 @@
192 * Jean II 192 * Jean II
193 */ 193 */
194#include <linux/module.h> 194#include <linux/module.h>
195#include <linux/slab.h>
195 196
196#include <net/irda/irda.h> 197#include <net/irda/irda.h>
197#include <net/irda/irqueue.h> 198#include <net/irda/irqueue.h>
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 9cb79f95bf63..47db1d8a0d92 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/slab.h>
31 32
32#include <asm/byteorder.h> 33#include <asm/byteorder.h>
33#include <asm/unaligned.h> 34#include <asm/unaligned.h>
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 539f43bc97db..ba9a3fcc2fed 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -26,6 +26,7 @@
26#include <linux/in6.h> 26#include <linux/in6.h>
27#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/slab.h>
29#include <net/net_namespace.h> 30#include <net/net_namespace.h>
30#include <net/netns/generic.h> 31#include <net/netns/generic.h>
31#include <net/xfrm.h> 32#include <net/xfrm.h>
@@ -41,10 +42,10 @@ struct netns_pfkey {
41 struct hlist_head table; 42 struct hlist_head table;
42 atomic_t socks_nr; 43 atomic_t socks_nr;
43}; 44};
44static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait); 45static DEFINE_MUTEX(pfkey_mutex);
45static DEFINE_RWLOCK(pfkey_table_lock);
46static atomic_t pfkey_table_users = ATOMIC_INIT(0);
47 46
47#define DUMMY_MARK 0
48static struct xfrm_mark dummy_mark = {0, 0};
48struct pfkey_sock { 49struct pfkey_sock {
49 /* struct sock must be the first member of struct pfkey_sock */ 50 /* struct sock must be the first member of struct pfkey_sock */
50 struct sock sk; 51 struct sock sk;
@@ -108,50 +109,6 @@ static void pfkey_sock_destruct(struct sock *sk)
108 atomic_dec(&net_pfkey->socks_nr); 109 atomic_dec(&net_pfkey->socks_nr);
109} 110}
110 111
111static void pfkey_table_grab(void)
112{
113 write_lock_bh(&pfkey_table_lock);
114
115 if (atomic_read(&pfkey_table_users)) {
116 DECLARE_WAITQUEUE(wait, current);
117
118 add_wait_queue_exclusive(&pfkey_table_wait, &wait);
119 for(;;) {
120 set_current_state(TASK_UNINTERRUPTIBLE);
121 if (atomic_read(&pfkey_table_users) == 0)
122 break;
123 write_unlock_bh(&pfkey_table_lock);
124 schedule();
125 write_lock_bh(&pfkey_table_lock);
126 }
127
128 __set_current_state(TASK_RUNNING);
129 remove_wait_queue(&pfkey_table_wait, &wait);
130 }
131}
132
133static __inline__ void pfkey_table_ungrab(void)
134{
135 write_unlock_bh(&pfkey_table_lock);
136 wake_up(&pfkey_table_wait);
137}
138
139static __inline__ void pfkey_lock_table(void)
140{
141 /* read_lock() synchronizes us to pfkey_table_grab */
142
143 read_lock(&pfkey_table_lock);
144 atomic_inc(&pfkey_table_users);
145 read_unlock(&pfkey_table_lock);
146}
147
148static __inline__ void pfkey_unlock_table(void)
149{
150 if (atomic_dec_and_test(&pfkey_table_users))
151 wake_up(&pfkey_table_wait);
152}
153
154
155static const struct proto_ops pfkey_ops; 112static const struct proto_ops pfkey_ops;
156 113
157static void pfkey_insert(struct sock *sk) 114static void pfkey_insert(struct sock *sk)
@@ -159,16 +116,16 @@ static void pfkey_insert(struct sock *sk)
159 struct net *net = sock_net(sk); 116 struct net *net = sock_net(sk);
160 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 117 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
161 118
162 pfkey_table_grab(); 119 mutex_lock(&pfkey_mutex);
163 sk_add_node(sk, &net_pfkey->table); 120 sk_add_node_rcu(sk, &net_pfkey->table);
164 pfkey_table_ungrab(); 121 mutex_unlock(&pfkey_mutex);
165} 122}
166 123
167static void pfkey_remove(struct sock *sk) 124static void pfkey_remove(struct sock *sk)
168{ 125{
169 pfkey_table_grab(); 126 mutex_lock(&pfkey_mutex);
170 sk_del_node_init(sk); 127 sk_del_node_init_rcu(sk);
171 pfkey_table_ungrab(); 128 mutex_unlock(&pfkey_mutex);
172} 129}
173 130
174static struct proto key_proto = { 131static struct proto key_proto = {
@@ -223,6 +180,8 @@ static int pfkey_release(struct socket *sock)
223 sock_orphan(sk); 180 sock_orphan(sk);
224 sock->sk = NULL; 181 sock->sk = NULL;
225 skb_queue_purge(&sk->sk_write_queue); 182 skb_queue_purge(&sk->sk_write_queue);
183
184 synchronize_rcu();
226 sock_put(sk); 185 sock_put(sk);
227 186
228 return 0; 187 return 0;
@@ -277,8 +236,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
277 if (!skb) 236 if (!skb)
278 return -ENOMEM; 237 return -ENOMEM;
279 238
280 pfkey_lock_table(); 239 rcu_read_lock();
281 sk_for_each(sk, node, &net_pfkey->table) { 240 sk_for_each_rcu(sk, node, &net_pfkey->table) {
282 struct pfkey_sock *pfk = pfkey_sk(sk); 241 struct pfkey_sock *pfk = pfkey_sk(sk);
283 int err2; 242 int err2;
284 243
@@ -309,7 +268,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
309 if ((broadcast_flags & BROADCAST_REGISTERED) && err) 268 if ((broadcast_flags & BROADCAST_REGISTERED) && err)
310 err = err2; 269 err = err2;
311 } 270 }
312 pfkey_unlock_table(); 271 rcu_read_unlock();
313 272
314 if (one_sk != NULL) 273 if (one_sk != NULL)
315 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); 274 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
@@ -691,7 +650,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_
691 if (!xaddr) 650 if (!xaddr)
692 return NULL; 651 return NULL;
693 652
694 return xfrm_state_lookup(net, xaddr, sa->sadb_sa_spi, proto, family); 653 return xfrm_state_lookup(net, DUMMY_MARK, xaddr, sa->sadb_sa_spi, proto, family);
695} 654}
696 655
697#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) 656#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
@@ -1360,7 +1319,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
1360 } 1319 }
1361 1320
1362 if (hdr->sadb_msg_seq) { 1321 if (hdr->sadb_msg_seq) {
1363 x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq); 1322 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
1364 if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) { 1323 if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) {
1365 xfrm_state_put(x); 1324 xfrm_state_put(x);
1366 x = NULL; 1325 x = NULL;
@@ -1368,7 +1327,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
1368 } 1327 }
1369 1328
1370 if (!x) 1329 if (!x)
1371 x = xfrm_find_acq(net, mode, reqid, proto, xdaddr, xsaddr, 1, family); 1330 x = xfrm_find_acq(net, &dummy_mark, mode, reqid, proto, xdaddr, xsaddr, 1, family);
1372 1331
1373 if (x == NULL) 1332 if (x == NULL)
1374 return -ENOENT; 1333 return -ENOENT;
@@ -1417,7 +1376,7 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
1417 if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0) 1376 if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
1418 return 0; 1377 return 0;
1419 1378
1420 x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq); 1379 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
1421 if (x == NULL) 1380 if (x == NULL)
1422 return 0; 1381 return 0;
1423 1382
@@ -1712,6 +1671,23 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
1712 return 0; 1671 return 0;
1713} 1672}
1714 1673
1674static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
1675{
1676 struct sk_buff *skb;
1677 struct sadb_msg *hdr;
1678
1679 skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
1680 if (!skb)
1681 return -ENOBUFS;
1682
1683 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
1684 memcpy(hdr, ihdr, sizeof(struct sadb_msg));
1685 hdr->sadb_msg_errno = (uint8_t) 0;
1686 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
1687
1688 return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
1689}
1690
1715static int key_notify_sa_flush(struct km_event *c) 1691static int key_notify_sa_flush(struct km_event *c)
1716{ 1692{
1717 struct sk_buff *skb; 1693 struct sk_buff *skb;
@@ -1740,7 +1716,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1740 unsigned proto; 1716 unsigned proto;
1741 struct km_event c; 1717 struct km_event c;
1742 struct xfrm_audit audit_info; 1718 struct xfrm_audit audit_info;
1743 int err; 1719 int err, err2;
1744 1720
1745 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1721 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1746 if (proto == 0) 1722 if (proto == 0)
@@ -1750,8 +1726,13 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1750 audit_info.sessionid = audit_get_sessionid(current); 1726 audit_info.sessionid = audit_get_sessionid(current);
1751 audit_info.secid = 0; 1727 audit_info.secid = 0;
1752 err = xfrm_state_flush(net, proto, &audit_info); 1728 err = xfrm_state_flush(net, proto, &audit_info);
1753 if (err) 1729 err2 = unicast_flush_resp(sk, hdr);
1754 return err; 1730 if (err || err2) {
1731 if (err == -ESRCH) /* empty table - go quietly */
1732 err = 0;
1733 return err ? err : err2;
1734 }
1735
1755 c.data.proto = proto; 1736 c.data.proto = proto;
1756 c.seq = hdr->sadb_msg_seq; 1737 c.seq = hdr->sadb_msg_seq;
1757 c.pid = hdr->sadb_msg_pid; 1738 c.pid = hdr->sadb_msg_pid;
@@ -2149,10 +2130,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2149 int err; 2130 int err;
2150 2131
2151 out_skb = pfkey_xfrm_policy2msg_prep(xp); 2132 out_skb = pfkey_xfrm_policy2msg_prep(xp);
2152 if (IS_ERR(out_skb)) { 2133 if (IS_ERR(out_skb))
2153 err = PTR_ERR(out_skb); 2134 return PTR_ERR(out_skb);
2154 goto out; 2135
2155 }
2156 err = pfkey_xfrm_policy2msg(out_skb, xp, dir); 2136 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
2157 if (err < 0) 2137 if (err < 0)
2158 return err; 2138 return err;
@@ -2168,7 +2148,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2168 out_hdr->sadb_msg_seq = c->seq; 2148 out_hdr->sadb_msg_seq = c->seq;
2169 out_hdr->sadb_msg_pid = c->pid; 2149 out_hdr->sadb_msg_pid = c->pid;
2170 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); 2150 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
2171out:
2172 return 0; 2151 return 0;
2173 2152
2174} 2153}
@@ -2346,7 +2325,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2346 return err; 2325 return err;
2347 } 2326 }
2348 2327
2349 xp = xfrm_policy_bysel_ctx(net, XFRM_POLICY_TYPE_MAIN, 2328 xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
2350 pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 2329 pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
2351 1, &err); 2330 1, &err);
2352 security_xfrm_policy_free(pol_ctx); 2331 security_xfrm_policy_free(pol_ctx);
@@ -2594,8 +2573,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2594 return -EINVAL; 2573 return -EINVAL;
2595 2574
2596 delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); 2575 delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
2597 xp = xfrm_policy_byid(net, XFRM_POLICY_TYPE_MAIN, dir, 2576 xp = xfrm_policy_byid(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
2598 pol->sadb_x_policy_id, delete, &err); 2577 dir, pol->sadb_x_policy_id, delete, &err);
2599 if (xp == NULL) 2578 if (xp == NULL)
2600 return -ENOENT; 2579 return -ENOENT;
2601 2580
@@ -2706,14 +2685,19 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2706 struct net *net = sock_net(sk); 2685 struct net *net = sock_net(sk);
2707 struct km_event c; 2686 struct km_event c;
2708 struct xfrm_audit audit_info; 2687 struct xfrm_audit audit_info;
2709 int err; 2688 int err, err2;
2710 2689
2711 audit_info.loginuid = audit_get_loginuid(current); 2690 audit_info.loginuid = audit_get_loginuid(current);
2712 audit_info.sessionid = audit_get_sessionid(current); 2691 audit_info.sessionid = audit_get_sessionid(current);
2713 audit_info.secid = 0; 2692 audit_info.secid = 0;
2714 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2693 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2715 if (err) 2694 err2 = unicast_flush_resp(sk, hdr);
2695 if (err || err2) {
2696 if (err == -ESRCH) /* empty table - old silent behavior */
2697 return 0;
2716 return err; 2698 return err;
2699 }
2700
2717 c.data.type = XFRM_POLICY_TYPE_MAIN; 2701 c.data.type = XFRM_POLICY_TYPE_MAIN;
2718 c.event = XFRM_MSG_FLUSHPOLICY; 2702 c.event = XFRM_MSG_FLUSHPOLICY;
2719 c.pid = hdr->sadb_msg_pid; 2703 c.pid = hdr->sadb_msg_pid;
@@ -3019,12 +3003,11 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e
3019static u32 get_acqseq(void) 3003static u32 get_acqseq(void)
3020{ 3004{
3021 u32 res; 3005 u32 res;
3022 static u32 acqseq; 3006 static atomic_t acqseq;
3023 static DEFINE_SPINLOCK(acqseq_lock);
3024 3007
3025 spin_lock_bh(&acqseq_lock); 3008 do {
3026 res = (++acqseq ? : ++acqseq); 3009 res = atomic_inc_return(&acqseq);
3027 spin_unlock_bh(&acqseq_lock); 3010 } while (!res);
3028 return res; 3011 return res;
3029} 3012}
3030 3013
@@ -3655,9 +3638,8 @@ static const struct net_proto_family pfkey_family_ops = {
3655#ifdef CONFIG_PROC_FS 3638#ifdef CONFIG_PROC_FS
3656static int pfkey_seq_show(struct seq_file *f, void *v) 3639static int pfkey_seq_show(struct seq_file *f, void *v)
3657{ 3640{
3658 struct sock *s; 3641 struct sock *s = sk_entry(v);
3659 3642
3660 s = (struct sock *)v;
3661 if (v == SEQ_START_TOKEN) 3643 if (v == SEQ_START_TOKEN)
3662 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); 3644 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
3663 else 3645 else
@@ -3676,19 +3658,9 @@ static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
3676{ 3658{
3677 struct net *net = seq_file_net(f); 3659 struct net *net = seq_file_net(f);
3678 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 3660 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3679 struct sock *s;
3680 struct hlist_node *node;
3681 loff_t pos = *ppos;
3682
3683 read_lock(&pfkey_table_lock);
3684 if (pos == 0)
3685 return SEQ_START_TOKEN;
3686 3661
3687 sk_for_each(s, node, &net_pfkey->table) 3662 rcu_read_lock();
3688 if (pos-- == 1) 3663 return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
3689 return s;
3690
3691 return NULL;
3692} 3664}
3693 3665
3694static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos) 3666static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
@@ -3696,15 +3668,12 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
3696 struct net *net = seq_file_net(f); 3668 struct net *net = seq_file_net(f);
3697 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 3669 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3698 3670
3699 ++*ppos; 3671 return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
3700 return (v == SEQ_START_TOKEN) ?
3701 sk_head(&net_pfkey->table) :
3702 sk_next((struct sock *)v);
3703} 3672}
3704 3673
3705static void pfkey_seq_stop(struct seq_file *f, void *v) 3674static void pfkey_seq_stop(struct seq_file *f, void *v)
3706{ 3675{
3707 read_unlock(&pfkey_table_lock); 3676 rcu_read_unlock();
3708} 3677}
3709 3678
3710static const struct seq_operations pfkey_seq_ops = { 3679static const struct seq_operations pfkey_seq_ops = {
@@ -3738,17 +3707,17 @@ static int __net_init pfkey_init_proc(struct net *net)
3738 return 0; 3707 return 0;
3739} 3708}
3740 3709
3741static void pfkey_exit_proc(struct net *net) 3710static void __net_exit pfkey_exit_proc(struct net *net)
3742{ 3711{
3743 proc_net_remove(net, "pfkey"); 3712 proc_net_remove(net, "pfkey");
3744} 3713}
3745#else 3714#else
3746static int __net_init pfkey_init_proc(struct net *net) 3715static inline int pfkey_init_proc(struct net *net)
3747{ 3716{
3748 return 0; 3717 return 0;
3749} 3718}
3750 3719
3751static void pfkey_exit_proc(struct net *net) 3720static inline void pfkey_exit_proc(struct net *net)
3752{ 3721{
3753} 3722}
3754#endif 3723#endif
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index bda96d18fd98..d5d8d555c410 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -29,6 +29,7 @@
29#include <linux/inet.h> 29#include <linux/inet.h>
30#include <linux/if_arp.h> 30#include <linux/if_arp.h>
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/slab.h>
32#include <net/sock.h> 33#include <net/sock.h>
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/system.h> 35#include <asm/system.h>
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 6762e7c751eb..21904a002449 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -27,6 +27,7 @@
27#include <linux/inet.h> 27#include <linux/inet.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/slab.h>
30#include <net/sock.h> 31#include <net/sock.h>
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32#include <asm/system.h> 33#include <asm/system.h>
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index 339cc5f2684f..c75a79540f9f 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -25,6 +25,7 @@
25#include <linux/net.h> 25#include <linux/net.h>
26#include <linux/inet.h> 26#include <linux/inet.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/slab.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/system.h> 31#include <asm/system.h>
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index b827f47ac133..43a2a7fb327b 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -24,6 +24,7 @@
24#include <linux/net.h> 24#include <linux/net.h>
25#include <linux/inet.h> 25#include <linux/inet.h>
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/slab.h>
27#include <net/sock.h> 28#include <net/sock.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/system.h> 30#include <asm/system.h>
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3a66546cad06..2db6a9f75913 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/rtnetlink.h> 26#include <linux/rtnetlink.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/slab.h>
28#include <net/llc.h> 29#include <net/llc.h>
29#include <net/llc_sap.h> 30#include <net/llc_sap.h>
30#include <net/llc_pdu.h> 31#include <net/llc_pdu.h>
@@ -47,6 +48,10 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
47#define dprintk(args...) 48#define dprintk(args...)
48#endif 49#endif
49 50
51/* Maybe we'll add some more in the future. */
52#define LLC_CMSG_PKTINFO 1
53
54
50/** 55/**
51 * llc_ui_next_link_no - return the next unused link number for a sap 56 * llc_ui_next_link_no - return the next unused link number for a sap
52 * @sap: Address of sap to get link number from. 57 * @sap: Address of sap to get link number from.
@@ -136,6 +141,7 @@ static struct proto llc_proto = {
136 .name = "LLC", 141 .name = "LLC",
137 .owner = THIS_MODULE, 142 .owner = THIS_MODULE,
138 .obj_size = sizeof(struct llc_sock), 143 .obj_size = sizeof(struct llc_sock),
144 .slab_flags = SLAB_DESTROY_BY_RCU,
139}; 145};
140 146
141/** 147/**
@@ -192,10 +198,8 @@ static int llc_ui_release(struct socket *sock)
192 llc->laddr.lsap, llc->daddr.lsap); 198 llc->laddr.lsap, llc->daddr.lsap);
193 if (!llc_send_disc(sk)) 199 if (!llc_send_disc(sk))
194 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 200 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
195 if (!sock_flag(sk, SOCK_ZAPPED)) { 201 if (!sock_flag(sk, SOCK_ZAPPED))
196 llc_sap_put(llc->sap);
197 llc_sap_remove_socket(llc->sap, sk); 202 llc_sap_remove_socket(llc->sap, sk);
198 }
199 release_sock(sk); 203 release_sock(sk);
200 if (llc->dev) 204 if (llc->dev)
201 dev_put(llc->dev); 205 dev_put(llc->dev);
@@ -255,7 +259,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
255 if (!sock_flag(sk, SOCK_ZAPPED)) 259 if (!sock_flag(sk, SOCK_ZAPPED))
256 goto out; 260 goto out;
257 rc = -ENODEV; 261 rc = -ENODEV;
258 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); 262 if (sk->sk_bound_dev_if) {
263 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
264 if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
265 dev_put(llc->dev);
266 llc->dev = NULL;
267 }
268 } else
269 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
259 if (!llc->dev) 270 if (!llc->dev)
260 goto out; 271 goto out;
261 rc = -EUSERS; 272 rc = -EUSERS;
@@ -306,7 +317,25 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
306 goto out; 317 goto out;
307 rc = -ENODEV; 318 rc = -ENODEV;
308 rtnl_lock(); 319 rtnl_lock();
309 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac); 320 if (sk->sk_bound_dev_if) {
321 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
322 if (llc->dev) {
323 if (!addr->sllc_arphrd)
324 addr->sllc_arphrd = llc->dev->type;
325 if (llc_mac_null(addr->sllc_mac))
326 memcpy(addr->sllc_mac, llc->dev->dev_addr,
327 IFHWADDRLEN);
328 if (addr->sllc_arphrd != llc->dev->type ||
329 !llc_mac_match(addr->sllc_mac,
330 llc->dev->dev_addr)) {
331 rc = -EINVAL;
332 dev_put(llc->dev);
333 llc->dev = NULL;
334 }
335 }
336 } else
337 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
338 addr->sllc_mac);
310 rtnl_unlock(); 339 rtnl_unlock();
311 if (!llc->dev) 340 if (!llc->dev)
312 goto out; 341 goto out;
@@ -322,7 +351,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
322 rc = -EBUSY; /* some other network layer is using the sap */ 351 rc = -EBUSY; /* some other network layer is using the sap */
323 if (!sap) 352 if (!sap)
324 goto out; 353 goto out;
325 llc_sap_hold(sap);
326 } else { 354 } else {
327 struct llc_addr laddr, daddr; 355 struct llc_addr laddr, daddr;
328 struct sock *ask; 356 struct sock *ask;
@@ -591,6 +619,20 @@ static int llc_wait_data(struct sock *sk, long timeo)
591 return rc; 619 return rc;
592} 620}
593 621
622static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
623{
624 struct llc_sock *llc = llc_sk(skb->sk);
625
626 if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
627 struct llc_pktinfo info;
628
629 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
630 llc_pdu_decode_dsap(skb, &info.lpi_sap);
631 llc_pdu_decode_da(skb, info.lpi_mac);
632 put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
633 }
634}
635
594/** 636/**
595 * llc_ui_accept - accept a new incoming connection. 637 * llc_ui_accept - accept a new incoming connection.
596 * @sock: Socket which connections arrive on. 638 * @sock: Socket which connections arrive on.
@@ -812,6 +854,8 @@ copy_uaddr:
812 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 854 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
813 msg->msg_namelen = sizeof(*uaddr); 855 msg->msg_namelen = sizeof(*uaddr);
814 } 856 }
857 if (llc_sk(sk)->cmsg_flags)
858 llc_cmsg_rcv(msg, skb);
815 goto out; 859 goto out;
816} 860}
817 861
@@ -1030,6 +1074,12 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
1030 goto out; 1074 goto out;
1031 llc->rw = opt; 1075 llc->rw = opt;
1032 break; 1076 break;
1077 case LLC_OPT_PKTINFO:
1078 if (opt)
1079 llc->cmsg_flags |= LLC_CMSG_PKTINFO;
1080 else
1081 llc->cmsg_flags &= ~LLC_CMSG_PKTINFO;
1082 break;
1033 default: 1083 default:
1034 rc = -ENOPROTOOPT; 1084 rc = -ENOPROTOOPT;
1035 goto out; 1085 goto out;
@@ -1083,6 +1133,9 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
1083 val = llc->k; break; 1133 val = llc->k; break;
1084 case LLC_OPT_RX_WIN: 1134 case LLC_OPT_RX_WIN:
1085 val = llc->rw; break; 1135 val = llc->rw; break;
1136 case LLC_OPT_PKTINFO:
1137 val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0;
1138 break;
1086 default: 1139 default:
1087 rc = -ENOPROTOOPT; 1140 rc = -ENOPROTOOPT;
1088 goto out; 1141 goto out;
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 019c780512e8..ea225bd2672c 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -18,6 +18,7 @@
18 * See the GNU General Public License for more details. 18 * See the GNU General Public License for more details.
19 */ 19 */
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/slab.h>
21#include <net/llc_conn.h> 22#include <net/llc_conn.h>
22#include <net/llc_sap.h> 23#include <net/llc_sap.h>
23#include <net/sock.h> 24#include <net/sock.h>
@@ -1437,7 +1438,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
1437 llc_conn_state_process(sk, skb); 1438 llc_conn_state_process(sk, skb);
1438 else { 1439 else {
1439 llc_set_backlog_type(skb, LLC_EVENT); 1440 llc_set_backlog_type(skb, LLC_EVENT);
1440 sk_add_backlog(sk, skb); 1441 __sk_add_backlog(sk, skb);
1441 } 1442 }
1442 } 1443 }
1443} 1444}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c6bab39b018e..ba137a6a224d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h>
16#include <net/llc_sap.h> 17#include <net/llc_sap.h>
17#include <net/llc_conn.h> 18#include <net/llc_conn.h>
18#include <net/sock.h> 19#include <net/sock.h>
@@ -468,6 +469,19 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
468 return rc; 469 return rc;
469} 470}
470 471
472static inline bool llc_estab_match(const struct llc_sap *sap,
473 const struct llc_addr *daddr,
474 const struct llc_addr *laddr,
475 const struct sock *sk)
476{
477 struct llc_sock *llc = llc_sk(sk);
478
479 return llc->laddr.lsap == laddr->lsap &&
480 llc->daddr.lsap == daddr->lsap &&
481 llc_mac_match(llc->laddr.mac, laddr->mac) &&
482 llc_mac_match(llc->daddr.mac, daddr->mac);
483}
484
471/** 485/**
472 * __llc_lookup_established - Finds connection for the remote/local sap/mac 486 * __llc_lookup_established - Finds connection for the remote/local sap/mac
473 * @sap: SAP 487 * @sap: SAP
@@ -484,23 +498,35 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
484 struct llc_addr *laddr) 498 struct llc_addr *laddr)
485{ 499{
486 struct sock *rc; 500 struct sock *rc;
487 struct hlist_node *node; 501 struct hlist_nulls_node *node;
488 502 int slot = llc_sk_laddr_hashfn(sap, laddr);
489 read_lock(&sap->sk_list.lock); 503 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
490 sk_for_each(rc, node, &sap->sk_list.list) { 504
491 struct llc_sock *llc = llc_sk(rc); 505 rcu_read_lock();
492 506again:
493 if (llc->laddr.lsap == laddr->lsap && 507 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
494 llc->daddr.lsap == daddr->lsap && 508 if (llc_estab_match(sap, daddr, laddr, rc)) {
495 llc_mac_match(llc->laddr.mac, laddr->mac) && 509 /* Extra checks required by SLAB_DESTROY_BY_RCU */
496 llc_mac_match(llc->daddr.mac, daddr->mac)) { 510 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
497 sock_hold(rc); 511 goto again;
512 if (unlikely(llc_sk(rc)->sap != sap ||
513 !llc_estab_match(sap, daddr, laddr, rc))) {
514 sock_put(rc);
515 continue;
516 }
498 goto found; 517 goto found;
499 } 518 }
500 } 519 }
501 rc = NULL; 520 rc = NULL;
521 /*
522 * if the nulls value we got at the end of this lookup is
523 * not the expected one, we must restart lookup.
524 * We probably met an item that was moved to another chain.
525 */
526 if (unlikely(get_nulls_value(node) != slot))
527 goto again;
502found: 528found:
503 read_unlock(&sap->sk_list.lock); 529 rcu_read_unlock();
504 return rc; 530 return rc;
505} 531}
506 532
@@ -516,6 +542,53 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
516 return sk; 542 return sk;
517} 543}
518 544
545static inline bool llc_listener_match(const struct llc_sap *sap,
546 const struct llc_addr *laddr,
547 const struct sock *sk)
548{
549 struct llc_sock *llc = llc_sk(sk);
550
551 return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
552 llc->laddr.lsap == laddr->lsap &&
553 llc_mac_match(llc->laddr.mac, laddr->mac);
554}
555
556static struct sock *__llc_lookup_listener(struct llc_sap *sap,
557 struct llc_addr *laddr)
558{
559 struct sock *rc;
560 struct hlist_nulls_node *node;
561 int slot = llc_sk_laddr_hashfn(sap, laddr);
562 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
563
564 rcu_read_lock();
565again:
566 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
567 if (llc_listener_match(sap, laddr, rc)) {
568 /* Extra checks required by SLAB_DESTROY_BY_RCU */
569 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
570 goto again;
571 if (unlikely(llc_sk(rc)->sap != sap ||
572 !llc_listener_match(sap, laddr, rc))) {
573 sock_put(rc);
574 continue;
575 }
576 goto found;
577 }
578 }
579 rc = NULL;
580 /*
581 * if the nulls value we got at the end of this lookup is
582 * not the expected one, we must restart lookup.
583 * We probably met an item that was moved to another chain.
584 */
585 if (unlikely(get_nulls_value(node) != slot))
586 goto again;
587found:
588 rcu_read_unlock();
589 return rc;
590}
591
519/** 592/**
520 * llc_lookup_listener - Finds listener for local MAC + SAP 593 * llc_lookup_listener - Finds listener for local MAC + SAP
521 * @sap: SAP 594 * @sap: SAP
@@ -529,24 +602,12 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
529static struct sock *llc_lookup_listener(struct llc_sap *sap, 602static struct sock *llc_lookup_listener(struct llc_sap *sap,
530 struct llc_addr *laddr) 603 struct llc_addr *laddr)
531{ 604{
532 struct sock *rc; 605 static struct llc_addr null_addr;
533 struct hlist_node *node; 606 struct sock *rc = __llc_lookup_listener(sap, laddr);
534 607
535 read_lock(&sap->sk_list.lock); 608 if (!rc)
536 sk_for_each(rc, node, &sap->sk_list.list) { 609 rc = __llc_lookup_listener(sap, &null_addr);
537 struct llc_sock *llc = llc_sk(rc);
538 610
539 if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
540 llc->laddr.lsap == laddr->lsap &&
541 (llc_mac_match(llc->laddr.mac, laddr->mac) ||
542 llc_mac_null(llc->laddr.mac))) {
543 sock_hold(rc);
544 goto found;
545 }
546 }
547 rc = NULL;
548found:
549 read_unlock(&sap->sk_list.lock);
550 return rc; 611 return rc;
551} 612}
552 613
@@ -647,15 +708,22 @@ static int llc_find_offset(int state, int ev_type)
647 * @sap: SAP 708 * @sap: SAP
648 * @sk: socket 709 * @sk: socket
649 * 710 *
650 * This function adds a socket to sk_list of a SAP. 711 * This function adds a socket to the hash tables of a SAP.
651 */ 712 */
652void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) 713void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
653{ 714{
715 struct llc_sock *llc = llc_sk(sk);
716 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
717 struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
718
654 llc_sap_hold(sap); 719 llc_sap_hold(sap);
655 write_lock_bh(&sap->sk_list.lock);
656 llc_sk(sk)->sap = sap; 720 llc_sk(sk)->sap = sap;
657 sk_add_node(sk, &sap->sk_list.list); 721
658 write_unlock_bh(&sap->sk_list.lock); 722 spin_lock_bh(&sap->sk_lock);
723 sap->sk_count++;
724 sk_nulls_add_node_rcu(sk, laddr_hb);
725 hlist_add_head(&llc->dev_hash_node, dev_hb);
726 spin_unlock_bh(&sap->sk_lock);
659} 727}
660 728
661/** 729/**
@@ -663,14 +731,18 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
663 * @sap: SAP 731 * @sap: SAP
664 * @sk: socket 732 * @sk: socket
665 * 733 *
666 * This function removes a connection from sk_list.list of a SAP if 734 * This function removes a connection from the hash tables of a SAP if
667 * the connection was in this list. 735 * the connection was in this list.
668 */ 736 */
669void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) 737void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
670{ 738{
671 write_lock_bh(&sap->sk_list.lock); 739 struct llc_sock *llc = llc_sk(sk);
672 sk_del_node_init(sk); 740
673 write_unlock_bh(&sap->sk_list.lock); 741 spin_lock_bh(&sap->sk_lock);
742 sk_nulls_del_node_init_rcu(sk);
743 hlist_del(&llc->dev_hash_node);
744 sap->sk_count--;
745 spin_unlock_bh(&sap->sk_lock);
674 llc_sap_put(sap); 746 llc_sap_put(sap);
675} 747}
676 748
@@ -756,7 +828,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
756 else { 828 else {
757 dprintk("%s: adding to backlog...\n", __func__); 829 dprintk("%s: adding to backlog...\n", __func__);
758 llc_set_backlog_type(skb, LLC_PACKET); 830 llc_set_backlog_type(skb, LLC_PACKET);
759 sk_add_backlog(sk, skb); 831 if (sk_add_backlog(sk, skb))
832 goto drop_unlock;
760 } 833 }
761out: 834out:
762 bh_unlock_sock(sk); 835 bh_unlock_sock(sk);
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index ff4c0ab96a69..78167e81dfeb 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -23,7 +23,7 @@
23#include <net/llc.h> 23#include <net/llc.h>
24 24
25LIST_HEAD(llc_sap_list); 25LIST_HEAD(llc_sap_list);
26DEFINE_RWLOCK(llc_sap_list_lock); 26DEFINE_SPINLOCK(llc_sap_list_lock);
27 27
28/** 28/**
29 * llc_sap_alloc - allocates and initializes sap. 29 * llc_sap_alloc - allocates and initializes sap.
@@ -33,40 +33,19 @@ DEFINE_RWLOCK(llc_sap_list_lock);
33static struct llc_sap *llc_sap_alloc(void) 33static struct llc_sap *llc_sap_alloc(void)
34{ 34{
35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); 35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
36 int i;
36 37
37 if (sap) { 38 if (sap) {
38 /* sap->laddr.mac - leave as a null, it's filled by bind */ 39 /* sap->laddr.mac - leave as a null, it's filled by bind */
39 sap->state = LLC_SAP_STATE_ACTIVE; 40 sap->state = LLC_SAP_STATE_ACTIVE;
40 rwlock_init(&sap->sk_list.lock); 41 spin_lock_init(&sap->sk_lock);
42 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++)
43 INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i);
41 atomic_set(&sap->refcnt, 1); 44 atomic_set(&sap->refcnt, 1);
42 } 45 }
43 return sap; 46 return sap;
44} 47}
45 48
46/**
47 * llc_add_sap - add sap to station list
48 * @sap: Address of the sap
49 *
50 * Adds a sap to the LLC's station sap list.
51 */
52static void llc_add_sap(struct llc_sap *sap)
53{
54 list_add_tail(&sap->node, &llc_sap_list);
55}
56
57/**
58 * llc_del_sap - del sap from station list
59 * @sap: Address of the sap
60 *
61 * Removes a sap to the LLC's station sap list.
62 */
63static void llc_del_sap(struct llc_sap *sap)
64{
65 write_lock_bh(&llc_sap_list_lock);
66 list_del(&sap->node);
67 write_unlock_bh(&llc_sap_list_lock);
68}
69
70static struct llc_sap *__llc_sap_find(unsigned char sap_value) 49static struct llc_sap *__llc_sap_find(unsigned char sap_value)
71{ 50{
72 struct llc_sap* sap; 51 struct llc_sap* sap;
@@ -90,13 +69,13 @@ out:
90 */ 69 */
91struct llc_sap *llc_sap_find(unsigned char sap_value) 70struct llc_sap *llc_sap_find(unsigned char sap_value)
92{ 71{
93 struct llc_sap* sap; 72 struct llc_sap *sap;
94 73
95 read_lock_bh(&llc_sap_list_lock); 74 rcu_read_lock_bh();
96 sap = __llc_sap_find(sap_value); 75 sap = __llc_sap_find(sap_value);
97 if (sap) 76 if (sap)
98 llc_sap_hold(sap); 77 llc_sap_hold(sap);
99 read_unlock_bh(&llc_sap_list_lock); 78 rcu_read_unlock_bh();
100 return sap; 79 return sap;
101} 80}
102 81
@@ -117,7 +96,7 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
117{ 96{
118 struct llc_sap *sap = NULL; 97 struct llc_sap *sap = NULL;
119 98
120 write_lock_bh(&llc_sap_list_lock); 99 spin_lock_bh(&llc_sap_list_lock);
121 if (__llc_sap_find(lsap)) /* SAP already exists */ 100 if (__llc_sap_find(lsap)) /* SAP already exists */
122 goto out; 101 goto out;
123 sap = llc_sap_alloc(); 102 sap = llc_sap_alloc();
@@ -125,9 +104,9 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
125 goto out; 104 goto out;
126 sap->laddr.lsap = lsap; 105 sap->laddr.lsap = lsap;
127 sap->rcv_func = func; 106 sap->rcv_func = func;
128 llc_add_sap(sap); 107 list_add_tail_rcu(&sap->node, &llc_sap_list);
129out: 108out:
130 write_unlock_bh(&llc_sap_list_lock); 109 spin_unlock_bh(&llc_sap_list_lock);
131 return sap; 110 return sap;
132} 111}
133 112
@@ -142,8 +121,14 @@ out:
142 */ 121 */
143void llc_sap_close(struct llc_sap *sap) 122void llc_sap_close(struct llc_sap *sap)
144{ 123{
145 WARN_ON(!hlist_empty(&sap->sk_list.list)); 124 WARN_ON(sap->sk_count);
146 llc_del_sap(sap); 125
126 spin_lock_bh(&llc_sap_list_lock);
127 list_del_rcu(&sap->node);
128 spin_unlock_bh(&llc_sap_list_lock);
129
130 synchronize_rcu();
131
147 kfree(sap); 132 kfree(sap);
148} 133}
149 134
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index a89917130a7b..25c31c0a3fdb 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -11,6 +11,7 @@
11 * 11 *
12 * See the GNU General Public License for more details. 12 * See the GNU General Public License for more details.
13 */ 13 */
14#include <linux/gfp.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/netdevice.h> 17#include <linux/netdevice.h>
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 57ad974e4d94..f99687439139 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -12,6 +12,7 @@
12 * See the GNU General Public License for more details. 12 * See the GNU General Public License for more details.
13 */ 13 */
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/slab.h>
15#include <net/net_namespace.h> 16#include <net/net_namespace.h>
16#include <net/llc.h> 17#include <net/llc.h>
17#include <net/llc_pdu.h> 18#include <net/llc_pdu.h>
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 754f4fedc852..b38a1079a98e 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -33,48 +33,19 @@
33int llc_mac_hdr_init(struct sk_buff *skb, 33int llc_mac_hdr_init(struct sk_buff *skb,
34 const unsigned char *sa, const unsigned char *da) 34 const unsigned char *sa, const unsigned char *da)
35{ 35{
36 int rc = 0; 36 int rc = -EINVAL;
37 37
38 switch (skb->dev->type) { 38 switch (skb->dev->type) {
39#ifdef CONFIG_TR 39 case ARPHRD_IEEE802_TR:
40 case ARPHRD_IEEE802_TR: {
41 struct net_device *dev = skb->dev;
42 struct trh_hdr *trh;
43
44 skb_push(skb, sizeof(*trh));
45 skb_reset_mac_header(skb);
46 trh = tr_hdr(skb);
47 trh->ac = AC;
48 trh->fc = LLC_FRAME;
49 if (sa)
50 memcpy(trh->saddr, sa, dev->addr_len);
51 else
52 memset(trh->saddr, 0, dev->addr_len);
53 if (da) {
54 memcpy(trh->daddr, da, dev->addr_len);
55 tr_source_route(skb, trh, dev);
56 skb_reset_mac_header(skb);
57 }
58 break;
59 }
60#endif
61 case ARPHRD_ETHER: 40 case ARPHRD_ETHER:
62 case ARPHRD_LOOPBACK: { 41 case ARPHRD_LOOPBACK:
63 unsigned short len = skb->len; 42 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
64 struct ethhdr *eth; 43 skb->len);
65 44 if (rc > 0)
66 skb_push(skb, sizeof(*eth)); 45 rc = 0;
67 skb_reset_mac_header(skb);
68 eth = eth_hdr(skb);
69 eth->h_proto = htons(len);
70 memcpy(eth->h_dest, da, ETH_ALEN);
71 memcpy(eth->h_source, sa, ETH_ALEN);
72 break; 46 break;
73 }
74 default: 47 default:
75 printk(KERN_WARNING "device type not supported: %d\n", 48 WARN(1, "device type not supported: %d\n", skb->dev->type);
76 skb->dev->type);
77 rc = -EINVAL;
78 } 49 }
79 return rc; 50 return rc;
80} 51}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index be47ac427f6b..7af1ff2d1f19 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -32,21 +32,23 @@ static void llc_ui_format_mac(struct seq_file *seq, u8 *addr)
32 32
33static struct sock *llc_get_sk_idx(loff_t pos) 33static struct sock *llc_get_sk_idx(loff_t pos)
34{ 34{
35 struct list_head *sap_entry;
36 struct llc_sap *sap; 35 struct llc_sap *sap;
37 struct hlist_node *node;
38 struct sock *sk = NULL; 36 struct sock *sk = NULL;
39 37 int i;
40 list_for_each(sap_entry, &llc_sap_list) { 38
41 sap = list_entry(sap_entry, struct llc_sap, node); 39 list_for_each_entry_rcu(sap, &llc_sap_list, node) {
42 40 spin_lock_bh(&sap->sk_lock);
43 read_lock_bh(&sap->sk_list.lock); 41 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) {
44 sk_for_each(sk, node, &sap->sk_list.list) { 42 struct hlist_nulls_head *head = &sap->sk_laddr_hash[i];
45 if (!pos) 43 struct hlist_nulls_node *node;
46 goto found; 44
47 --pos; 45 sk_nulls_for_each(sk, node, head) {
46 if (!pos)
47 goto found; /* keep the lock */
48 --pos;
49 }
48 } 50 }
49 read_unlock_bh(&sap->sk_list.lock); 51 spin_unlock_bh(&sap->sk_lock);
50 } 52 }
51 sk = NULL; 53 sk = NULL;
52found: 54found:
@@ -57,10 +59,23 @@ static void *llc_seq_start(struct seq_file *seq, loff_t *pos)
57{ 59{
58 loff_t l = *pos; 60 loff_t l = *pos;
59 61
60 read_lock_bh(&llc_sap_list_lock); 62 rcu_read_lock_bh();
61 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN; 63 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN;
62} 64}
63 65
66static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket)
67{
68 struct hlist_nulls_node *node;
69 struct sock *sk = NULL;
70
71 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
72 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
73 goto out;
74
75out:
76 return sk;
77}
78
64static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 79static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
65{ 80{
66 struct sock* sk, *next; 81 struct sock* sk, *next;
@@ -73,25 +88,23 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
73 goto out; 88 goto out;
74 } 89 }
75 sk = v; 90 sk = v;
76 next = sk_next(sk); 91 next = sk_nulls_next(sk);
77 if (next) { 92 if (next) {
78 sk = next; 93 sk = next;
79 goto out; 94 goto out;
80 } 95 }
81 llc = llc_sk(sk); 96 llc = llc_sk(sk);
82 sap = llc->sap; 97 sap = llc->sap;
83 read_unlock_bh(&sap->sk_list.lock); 98 sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr));
84 sk = NULL; 99 if (sk)
85 for (;;) { 100 goto out;
86 if (sap->node.next == &llc_sap_list) 101 spin_unlock_bh(&sap->sk_lock);
87 break; 102 list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
88 sap = list_entry(sap->node.next, struct llc_sap, node); 103 spin_lock_bh(&sap->sk_lock);
89 read_lock_bh(&sap->sk_list.lock); 104 sk = laddr_hash_next(sap, -1);
90 if (!hlist_empty(&sap->sk_list.list)) { 105 if (sk)
91 sk = sk_head(&sap->sk_list.list); 106 break; /* keep the lock */
92 break; 107 spin_unlock_bh(&sap->sk_lock);
93 }
94 read_unlock_bh(&sap->sk_list.lock);
95 } 108 }
96out: 109out:
97 return sk; 110 return sk;
@@ -104,9 +117,9 @@ static void llc_seq_stop(struct seq_file *seq, void *v)
104 struct llc_sock *llc = llc_sk(sk); 117 struct llc_sock *llc = llc_sk(sk);
105 struct llc_sap *sap = llc->sap; 118 struct llc_sap *sap = llc->sap;
106 119
107 read_unlock_bh(&sap->sk_list.lock); 120 spin_unlock_bh(&sap->sk_lock);
108 } 121 }
109 read_unlock_bh(&llc_sap_list_lock); 122 rcu_read_unlock_bh();
110} 123}
111 124
112static int llc_seq_socket_show(struct seq_file *seq, void *v) 125static int llc_seq_socket_show(struct seq_file *seq, void *v)
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 008de1fc42ca..a432f0ec051c 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -23,6 +23,7 @@
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <linux/llc.h> 25#include <linux/llc.h>
26#include <linux/slab.h>
26 27
27static int llc_mac_header_len(unsigned short devtype) 28static int llc_mac_header_len(unsigned short devtype)
28{ 29{
@@ -297,6 +298,17 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
297 llc_sap_state_process(sap, skb); 298 llc_sap_state_process(sap, skb);
298} 299}
299 300
301static inline bool llc_dgram_match(const struct llc_sap *sap,
302 const struct llc_addr *laddr,
303 const struct sock *sk)
304{
305 struct llc_sock *llc = llc_sk(sk);
306
307 return sk->sk_type == SOCK_DGRAM &&
308 llc->laddr.lsap == laddr->lsap &&
309 llc_mac_match(llc->laddr.mac, laddr->mac);
310}
311
300/** 312/**
301 * llc_lookup_dgram - Finds dgram socket for the local sap/mac 313 * llc_lookup_dgram - Finds dgram socket for the local sap/mac
302 * @sap: SAP 314 * @sap: SAP
@@ -309,25 +321,68 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
309 const struct llc_addr *laddr) 321 const struct llc_addr *laddr)
310{ 322{
311 struct sock *rc; 323 struct sock *rc;
312 struct hlist_node *node; 324 struct hlist_nulls_node *node;
313 325 int slot = llc_sk_laddr_hashfn(sap, laddr);
314 read_lock_bh(&sap->sk_list.lock); 326 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
315 sk_for_each(rc, node, &sap->sk_list.list) { 327
316 struct llc_sock *llc = llc_sk(rc); 328 rcu_read_lock_bh();
317 329again:
318 if (rc->sk_type == SOCK_DGRAM && 330 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
319 llc->laddr.lsap == laddr->lsap && 331 if (llc_dgram_match(sap, laddr, rc)) {
320 llc_mac_match(llc->laddr.mac, laddr->mac)) { 332 /* Extra checks required by SLAB_DESTROY_BY_RCU */
321 sock_hold(rc); 333 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
334 goto again;
335 if (unlikely(llc_sk(rc)->sap != sap ||
336 !llc_dgram_match(sap, laddr, rc))) {
337 sock_put(rc);
338 continue;
339 }
322 goto found; 340 goto found;
323 } 341 }
324 } 342 }
325 rc = NULL; 343 rc = NULL;
344 /*
345 * if the nulls value we got at the end of this lookup is
346 * not the expected one, we must restart lookup.
347 * We probably met an item that was moved to another chain.
348 */
349 if (unlikely(get_nulls_value(node) != slot))
350 goto again;
326found: 351found:
327 read_unlock_bh(&sap->sk_list.lock); 352 rcu_read_unlock_bh();
328 return rc; 353 return rc;
329} 354}
330 355
356static inline bool llc_mcast_match(const struct llc_sap *sap,
357 const struct llc_addr *laddr,
358 const struct sk_buff *skb,
359 const struct sock *sk)
360{
361 struct llc_sock *llc = llc_sk(sk);
362
363 return sk->sk_type == SOCK_DGRAM &&
364 llc->laddr.lsap == laddr->lsap &&
365 llc->dev == skb->dev;
366}
367
368static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
369 struct sock **stack, int count)
370{
371 struct sk_buff *skb1;
372 int i;
373
374 for (i = 0; i < count; i++) {
375 skb1 = skb_clone(skb, GFP_ATOMIC);
376 if (!skb1) {
377 sock_put(stack[i]);
378 continue;
379 }
380
381 llc_sap_rcv(sap, skb1, stack[i]);
382 sock_put(stack[i]);
383 }
384}
385
331/** 386/**
332 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets. 387 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets.
333 * @sap: SAP 388 * @sap: SAP
@@ -340,32 +395,31 @@ static void llc_sap_mcast(struct llc_sap *sap,
340 const struct llc_addr *laddr, 395 const struct llc_addr *laddr,
341 struct sk_buff *skb) 396 struct sk_buff *skb)
342{ 397{
343 struct sock *sk; 398 int i = 0, count = 256 / sizeof(struct sock *);
399 struct sock *sk, *stack[count];
344 struct hlist_node *node; 400 struct hlist_node *node;
401 struct llc_sock *llc;
402 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
345 403
346 read_lock_bh(&sap->sk_list.lock); 404 spin_lock_bh(&sap->sk_lock);
347 sk_for_each(sk, node, &sap->sk_list.list) { 405 hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
348 struct llc_sock *llc = llc_sk(sk);
349 struct sk_buff *skb1;
350 406
351 if (sk->sk_type != SOCK_DGRAM) 407 sk = &llc->sk;
352 continue;
353 408
354 if (llc->laddr.lsap != laddr->lsap) 409 if (!llc_mcast_match(sap, laddr, skb, sk))
355 continue; 410 continue;
356 411
357 if (llc->dev != skb->dev)
358 continue;
359
360 skb1 = skb_clone(skb, GFP_ATOMIC);
361 if (!skb1)
362 break;
363
364 sock_hold(sk); 412 sock_hold(sk);
365 llc_sap_rcv(sap, skb1, sk); 413 if (i < count)
366 sock_put(sk); 414 stack[i++] = sk;
415 else {
416 llc_do_mcast(sap, skb, stack, i);
417 i = 0;
418 }
367 } 419 }
368 read_unlock_bh(&sap->sk_list.lock); 420 spin_unlock_bh(&sap->sk_lock);
421
422 llc_do_mcast(sap, skb, stack, i);
369} 423}
370 424
371 425
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 83da13339490..e4dae0244d76 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/slab.h>
16#include <net/llc.h> 17#include <net/llc.h>
17#include <net/llc_sap.h> 18#include <net/llc_sap.h>
18#include <net/llc_conn.h> 19#include <net/llc_conn.h>
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a10d508b07e1..a952b7f8c648 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -96,18 +96,6 @@ menuconfig MAC80211_DEBUG_MENU
96 ---help--- 96 ---help---
97 This option collects various mac80211 debug settings. 97 This option collects various mac80211 debug settings.
98 98
99config MAC80211_DEBUG_PACKET_ALIGNMENT
100 bool "Enable packet alignment debugging"
101 depends on MAC80211_DEBUG_MENU
102 ---help---
103 This option is recommended for driver authors and strongly
104 discouraged for everybody else, it will trigger a warning
105 when a driver hands mac80211 a buffer that is aligned in
106 a way that will cause problems with the IP stack on some
107 architectures.
108
109 Say N unless you're writing a mac80211 based driver.
110
111config MAC80211_NOINLINE 99config MAC80211_NOINLINE
112 bool "Do not inline TX/RX handlers" 100 bool "Do not inline TX/RX handlers"
113 depends on MAC80211_DEBUG_MENU 101 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 298cfcc1bf8d..04420291e7ad 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -6,10 +6,10 @@ mac80211-y := \
6 sta_info.o \ 6 sta_info.o \
7 wep.o \ 7 wep.o \
8 wpa.o \ 8 wpa.o \
9 scan.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 mlme.o \ 12 mlme.o work.o \
13 iface.o \ 13 iface.o \
14 rate.o \ 14 rate.o \
15 michael.o \ 15 michael.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 51c7dc3c4c3b..f9516a27e233 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <linux/slab.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "driver-ops.h" 20#include "driver-ops.h"
@@ -41,8 +42,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
41 sta->sta.addr, tid); 42 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */ 43#endif /* CONFIG_MAC80211_HT_DEBUG */
43 44
44 if (drv_ampdu_action(local, &sta->sdata->vif, 45 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
45 IEEE80211_AMPDU_RX_STOP,
46 &sta->sta, tid, NULL)) 46 &sta->sta, tid, NULL))
47 printk(KERN_DEBUG "HW problem - can not stop rx " 47 printk(KERN_DEBUG "HW problem - can not stop rx "
48 "aggregation for tid %d\n", tid); 48 "aggregation for tid %d\n", tid);
@@ -83,12 +83,11 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
83void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, 83void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
84 u16 initiator, u16 reason) 84 u16 initiator, u16 reason)
85{ 85{
86 struct ieee80211_local *local = sdata->local;
87 struct sta_info *sta; 86 struct sta_info *sta;
88 87
89 rcu_read_lock(); 88 rcu_read_lock();
90 89
91 sta = sta_info_get(local, ra); 90 sta = sta_info_get(sdata, ra);
92 if (!sta) { 91 if (!sta) {
93 rcu_read_unlock(); 92 rcu_read_unlock();
94 return; 93 return;
@@ -136,7 +135,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
136 135
137 if (!skb) { 136 if (!skb) {
138 printk(KERN_DEBUG "%s: failed to allocate buffer " 137 printk(KERN_DEBUG "%s: failed to allocate buffer "
139 "for addba resp frame\n", sdata->dev->name); 138 "for addba resp frame\n", sdata->name);
140 return; 139 return;
141 } 140 }
142 141
@@ -144,10 +143,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
144 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 143 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
145 memset(mgmt, 0, 24); 144 memset(mgmt, 0, 24);
146 memcpy(mgmt->da, da, ETH_ALEN); 145 memcpy(mgmt->da, da, ETH_ALEN);
147 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 146 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
148 if (sdata->vif.type == NL80211_IFTYPE_AP || 147 if (sdata->vif.type == NL80211_IFTYPE_AP ||
149 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 148 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
150 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 149 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
151 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 150 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
152 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 151 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
153 152
@@ -281,8 +280,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
281 goto end; 280 goto end;
282 } 281 }
283 282
284 ret = drv_ampdu_action(local, &sta->sdata->vif, 283 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
285 IEEE80211_AMPDU_RX_START,
286 &sta->sta, tid, &start_seq_num); 284 &sta->sta, tid, &start_seq_num);
287#ifdef CONFIG_MAC80211_HT_DEBUG 285#ifdef CONFIG_MAC80211_HT_DEBUG
288 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); 286 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5e3a7eccef5a..87782a4bb541 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/ieee80211.h> 16#include <linux/ieee80211.h>
17#include <linux/slab.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "driver-ops.h" 20#include "driver-ops.h"
@@ -58,17 +59,17 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
58 59
59 if (!skb) { 60 if (!skb) {
60 printk(KERN_ERR "%s: failed to allocate buffer " 61 printk(KERN_ERR "%s: failed to allocate buffer "
61 "for addba request frame\n", sdata->dev->name); 62 "for addba request frame\n", sdata->name);
62 return; 63 return;
63 } 64 }
64 skb_reserve(skb, local->hw.extra_tx_headroom); 65 skb_reserve(skb, local->hw.extra_tx_headroom);
65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 66 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
66 memset(mgmt, 0, 24); 67 memset(mgmt, 0, 24);
67 memcpy(mgmt->da, da, ETH_ALEN); 68 memcpy(mgmt->da, da, ETH_ALEN);
68 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 69 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
69 if (sdata->vif.type == NL80211_IFTYPE_AP || 70 if (sdata->vif.type == NL80211_IFTYPE_AP ||
70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 71 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
71 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 72 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
72 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 73 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
73 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 74 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
74 75
@@ -104,7 +105,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 105 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
105 if (!skb) { 106 if (!skb) {
106 printk(KERN_ERR "%s: failed to allocate buffer for " 107 printk(KERN_ERR "%s: failed to allocate buffer for "
107 "bar frame\n", sdata->dev->name); 108 "bar frame\n", sdata->name);
108 return; 109 return;
109 } 110 }
110 skb_reserve(skb, local->hw.extra_tx_headroom); 111 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -113,7 +114,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 114 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
114 IEEE80211_STYPE_BACK_REQ); 115 IEEE80211_STYPE_BACK_REQ);
115 memcpy(bar->ra, ra, ETH_ALEN); 116 memcpy(bar->ra, ra, ETH_ALEN);
116 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); 117 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 118 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 119 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
119 bar_control |= (u16)(tid << 12); 120 bar_control |= (u16)(tid << 12);
@@ -144,7 +145,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 145 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 146 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
146 147
147 ret = drv_ampdu_action(local, &sta->sdata->vif, 148 ret = drv_ampdu_action(local, sta->sdata,
148 IEEE80211_AMPDU_TX_STOP, 149 IEEE80211_AMPDU_TX_STOP,
149 &sta->sta, tid, NULL); 150 &sta->sta, tid, NULL);
150 151
@@ -179,10 +180,10 @@ static void sta_addba_resp_timer_expired(unsigned long data)
179 180
180 /* check if the TID waits for addBA response */ 181 /* check if the TID waits for addBA response */
181 spin_lock_bh(&sta->lock); 182 spin_lock_bh(&sta->lock);
182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) != 183 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
184 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
183 HT_ADDBA_REQUESTED_MSK) { 185 HT_ADDBA_REQUESTED_MSK) {
184 spin_unlock_bh(&sta->lock); 186 spin_unlock_bh(&sta->lock);
185 *state = HT_AGG_STATE_IDLE;
186#ifdef CONFIG_MAC80211_HT_DEBUG 187#ifdef CONFIG_MAC80211_HT_DEBUG
187 printk(KERN_DEBUG "timer expired on tid %d but we are not " 188 printk(KERN_DEBUG "timer expired on tid %d but we are not "
188 "(or no longer) expecting addBA response there", 189 "(or no longer) expecting addBA response there",
@@ -236,6 +237,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
236 sdata->vif.type != NL80211_IFTYPE_AP) 237 sdata->vif.type != NL80211_IFTYPE_AP)
237 return -EINVAL; 238 return -EINVAL;
238 239
240 if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
241#ifdef CONFIG_MAC80211_HT_DEBUG
242 printk(KERN_DEBUG "Disassociation is in progress. "
243 "Denying BA session request\n");
244#endif
245 return -EINVAL;
246 }
247
239 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 248 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
240#ifdef CONFIG_MAC80211_HT_DEBUG 249#ifdef CONFIG_MAC80211_HT_DEBUG
241 printk(KERN_DEBUG "Suspend in progress. " 250 printk(KERN_DEBUG "Suspend in progress. "
@@ -301,10 +310,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
301 * call back right away, it must see that the flow has begun */ 310 * call back right away, it must see that the flow has begun */
302 *state |= HT_ADDBA_REQUESTED_MSK; 311 *state |= HT_ADDBA_REQUESTED_MSK;
303 312
304 start_seq_num = sta->tid_seq[tid]; 313 start_seq_num = sta->tid_seq[tid] >> 4;
305 314
306 ret = drv_ampdu_action(local, &sdata->vif, 315 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
307 IEEE80211_AMPDU_TX_START,
308 pubsta, tid, &start_seq_num); 316 pubsta, tid, &start_seq_num);
309 317
310 if (ret) { 318 if (ret) {
@@ -420,7 +428,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
420 ieee80211_agg_splice_finish(local, sta, tid); 428 ieee80211_agg_splice_finish(local, sta, tid);
421 spin_unlock(&local->ampdu_lock); 429 spin_unlock(&local->ampdu_lock);
422 430
423 drv_ampdu_action(local, &sta->sdata->vif, 431 drv_ampdu_action(local, sta->sdata,
424 IEEE80211_AMPDU_TX_OPERATIONAL, 432 IEEE80211_AMPDU_TX_OPERATIONAL,
425 &sta->sta, tid, NULL); 433 &sta->sta, tid, NULL);
426} 434}
@@ -441,7 +449,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
441 } 449 }
442 450
443 rcu_read_lock(); 451 rcu_read_lock();
444 sta = sta_info_get(local, ra); 452 sta = sta_info_get(sdata, ra);
445 if (!sta) { 453 if (!sta) {
446 rcu_read_unlock(); 454 rcu_read_unlock();
447#ifdef CONFIG_MAC80211_HT_DEBUG 455#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -489,7 +497,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
489#ifdef CONFIG_MAC80211_HT_DEBUG 497#ifdef CONFIG_MAC80211_HT_DEBUG
490 if (net_ratelimit()) 498 if (net_ratelimit())
491 printk(KERN_WARNING "%s: Not enough memory, " 499 printk(KERN_WARNING "%s: Not enough memory, "
492 "dropping start BA session", skb->dev->name); 500 "dropping start BA session", sdata->name);
493#endif 501#endif
494 return; 502 return;
495 } 503 }
@@ -564,7 +572,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
564#endif /* CONFIG_MAC80211_HT_DEBUG */ 572#endif /* CONFIG_MAC80211_HT_DEBUG */
565 573
566 rcu_read_lock(); 574 rcu_read_lock();
567 sta = sta_info_get(local, ra); 575 sta = sta_info_get(sdata, ra);
568 if (!sta) { 576 if (!sta) {
569#ifdef CONFIG_MAC80211_HT_DEBUG 577#ifdef CONFIG_MAC80211_HT_DEBUG
570 printk(KERN_DEBUG "Could not find station: %pM\n", ra); 578 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -621,7 +629,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
621#ifdef CONFIG_MAC80211_HT_DEBUG 629#ifdef CONFIG_MAC80211_HT_DEBUG
622 if (net_ratelimit()) 630 if (net_ratelimit())
623 printk(KERN_WARNING "%s: Not enough memory, " 631 printk(KERN_WARNING "%s: Not enough memory, "
624 "dropping stop BA session", skb->dev->name); 632 "dropping stop BA session", sdata->name);
625#endif 633#endif
626 return; 634 return;
627 } 635 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 9ae1a4760b58..edc872e22c9b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mac80211 configuration hooks for cfg80211 2 * mac80211 configuration hooks for cfg80211
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 * 5 *
6 * This file is GPLv2 as found in COPYING. 6 * This file is GPLv2 as found in COPYING.
7 */ 7 */
@@ -9,6 +9,7 @@
9#include <linux/ieee80211.h> 9#include <linux/ieee80211.h>
10#include <linux/nl80211.h> 10#include <linux/nl80211.h>
11#include <linux/rtnetlink.h> 11#include <linux/rtnetlink.h>
12#include <linux/slab.h>
12#include <net/net_namespace.h> 13#include <net/net_namespace.h>
13#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
14#include <net/cfg80211.h> 15#include <net/cfg80211.h>
@@ -78,17 +79,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
78 enum nl80211_iftype type, u32 *flags, 79 enum nl80211_iftype type, u32 *flags,
79 struct vif_params *params) 80 struct vif_params *params)
80{ 81{
81 struct ieee80211_sub_if_data *sdata; 82 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
82 int ret; 83 int ret;
83 84
84 if (netif_running(dev)) 85 if (ieee80211_sdata_running(sdata))
85 return -EBUSY; 86 return -EBUSY;
86 87
87 if (!nl80211_params_check(type, params)) 88 if (!nl80211_params_check(type, params))
88 return -EINVAL; 89 return -EINVAL;
89 90
90 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
91
92 ret = ieee80211_if_change_type(sdata, type); 91 ret = ieee80211_if_change_type(sdata, type);
93 if (ret) 92 if (ret)
94 return ret; 93 return ret;
@@ -150,7 +149,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
150 rcu_read_lock(); 149 rcu_read_lock();
151 150
152 if (mac_addr) { 151 if (mac_addr) {
153 sta = sta_info_get(sdata->local, mac_addr); 152 sta = sta_info_get_bss(sdata, mac_addr);
154 if (!sta) { 153 if (!sta) {
155 ieee80211_key_free(key); 154 ieee80211_key_free(key);
156 err = -ENOENT; 155 err = -ENOENT;
@@ -181,7 +180,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
181 if (mac_addr) { 180 if (mac_addr) {
182 ret = -ENOENT; 181 ret = -ENOENT;
183 182
184 sta = sta_info_get(sdata->local, mac_addr); 183 sta = sta_info_get_bss(sdata, mac_addr);
185 if (!sta) 184 if (!sta)
186 goto out_unlock; 185 goto out_unlock;
187 186
@@ -228,7 +227,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
228 rcu_read_lock(); 227 rcu_read_lock();
229 228
230 if (mac_addr) { 229 if (mac_addr) {
231 sta = sta_info_get(sdata->local, mac_addr); 230 sta = sta_info_get_bss(sdata, mac_addr);
232 if (!sta) 231 if (!sta)
233 goto out; 232 goto out;
234 233
@@ -415,15 +414,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
415static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 414static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
416 u8 *mac, struct station_info *sinfo) 415 u8 *mac, struct station_info *sinfo)
417{ 416{
418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 417 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
419 struct sta_info *sta; 418 struct sta_info *sta;
420 int ret = -ENOENT; 419 int ret = -ENOENT;
421 420
422 rcu_read_lock(); 421 rcu_read_lock();
423 422
424 /* XXX: verify sta->dev == dev */ 423 sta = sta_info_get_bss(sdata, mac);
425
426 sta = sta_info_get(local, mac);
427 if (sta) { 424 if (sta) {
428 ret = 0; 425 ret = 0;
429 sta_set_sinfo(sta, sinfo); 426 sta_set_sinfo(sta, sinfo);
@@ -519,6 +516,8 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
519 if (old) 516 if (old)
520 memcpy(new->tail, old->tail, new_tail_len); 517 memcpy(new->tail, old->tail, new_tail_len);
521 518
519 sdata->vif.bss_conf.dtim_period = new->dtim_period;
520
522 rcu_assign_pointer(sdata->u.ap.beacon, new); 521 rcu_assign_pointer(sdata->u.ap.beacon, new);
523 522
524 synchronize_rcu(); 523 synchronize_rcu();
@@ -732,7 +731,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
732 } else 731 } else
733 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 732 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
734 733
735 if (compare_ether_addr(mac, dev->dev_addr) == 0) 734 if (compare_ether_addr(mac, sdata->vif.addr) == 0)
736 return -EINVAL; 735 return -EINVAL;
737 736
738 if (is_multicast_ether_addr(mac)) 737 if (is_multicast_ether_addr(mac))
@@ -751,9 +750,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
751 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 750 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
752 sdata->vif.type == NL80211_IFTYPE_AP; 751 sdata->vif.type == NL80211_IFTYPE_AP;
753 752
754 rcu_read_lock(); 753 err = sta_info_insert_rcu(sta);
755
756 err = sta_info_insert(sta);
757 if (err) { 754 if (err) {
758 rcu_read_unlock(); 755 rcu_read_unlock();
759 return err; 756 return err;
@@ -772,27 +769,13 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
772{ 769{
773 struct ieee80211_local *local = wiphy_priv(wiphy); 770 struct ieee80211_local *local = wiphy_priv(wiphy);
774 struct ieee80211_sub_if_data *sdata; 771 struct ieee80211_sub_if_data *sdata;
775 struct sta_info *sta;
776 772
777 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 773 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
778 774
779 if (mac) { 775 if (mac)
780 rcu_read_lock(); 776 return sta_info_destroy_addr_bss(sdata, mac);
781
782 /* XXX: get sta belonging to dev */
783 sta = sta_info_get(local, mac);
784 if (!sta) {
785 rcu_read_unlock();
786 return -ENOENT;
787 }
788
789 sta_info_unlink(&sta);
790 rcu_read_unlock();
791
792 sta_info_destroy(sta);
793 } else
794 sta_info_flush(local, sdata);
795 777
778 sta_info_flush(local, sdata);
796 return 0; 779 return 0;
797} 780}
798 781
@@ -801,14 +784,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
801 u8 *mac, 784 u8 *mac,
802 struct station_parameters *params) 785 struct station_parameters *params)
803{ 786{
787 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
804 struct ieee80211_local *local = wiphy_priv(wiphy); 788 struct ieee80211_local *local = wiphy_priv(wiphy);
805 struct sta_info *sta; 789 struct sta_info *sta;
806 struct ieee80211_sub_if_data *vlansdata; 790 struct ieee80211_sub_if_data *vlansdata;
807 791
808 rcu_read_lock(); 792 rcu_read_lock();
809 793
810 /* XXX: get sta belonging to dev */ 794 sta = sta_info_get_bss(sdata, mac);
811 sta = sta_info_get(local, mac);
812 if (!sta) { 795 if (!sta) {
813 rcu_read_unlock(); 796 rcu_read_unlock();
814 return -ENOENT; 797 return -ENOENT;
@@ -847,7 +830,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
847static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, 830static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
848 u8 *dst, u8 *next_hop) 831 u8 *dst, u8 *next_hop)
849{ 832{
850 struct ieee80211_local *local = wiphy_priv(wiphy);
851 struct ieee80211_sub_if_data *sdata; 833 struct ieee80211_sub_if_data *sdata;
852 struct mesh_path *mpath; 834 struct mesh_path *mpath;
853 struct sta_info *sta; 835 struct sta_info *sta;
@@ -856,7 +838,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
856 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 838 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
857 839
858 rcu_read_lock(); 840 rcu_read_lock();
859 sta = sta_info_get(local, next_hop); 841 sta = sta_info_get(sdata, next_hop);
860 if (!sta) { 842 if (!sta) {
861 rcu_read_unlock(); 843 rcu_read_unlock();
862 return -ENOENT; 844 return -ENOENT;
@@ -895,7 +877,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
895 struct net_device *dev, 877 struct net_device *dev,
896 u8 *dst, u8 *next_hop) 878 u8 *dst, u8 *next_hop)
897{ 879{
898 struct ieee80211_local *local = wiphy_priv(wiphy);
899 struct ieee80211_sub_if_data *sdata; 880 struct ieee80211_sub_if_data *sdata;
900 struct mesh_path *mpath; 881 struct mesh_path *mpath;
901 struct sta_info *sta; 882 struct sta_info *sta;
@@ -904,7 +885,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
904 885
905 rcu_read_lock(); 886 rcu_read_lock();
906 887
907 sta = sta_info_get(local, next_hop); 888 sta = sta_info_get(sdata, next_hop);
908 if (!sta) { 889 if (!sta) {
909 rcu_read_unlock(); 890 rcu_read_unlock();
910 return -ENOENT; 891 return -ENOENT;
@@ -1092,6 +1073,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1092 params->use_short_preamble; 1073 params->use_short_preamble;
1093 changed |= BSS_CHANGED_ERP_PREAMBLE; 1074 changed |= BSS_CHANGED_ERP_PREAMBLE;
1094 } 1075 }
1076
1077 if (!sdata->vif.bss_conf.use_short_slot &&
1078 sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
1079 sdata->vif.bss_conf.use_short_slot = true;
1080 changed |= BSS_CHANGED_ERP_SLOT;
1081 }
1082
1095 if (params->use_short_slot_time >= 0) { 1083 if (params->use_short_slot_time >= 0) {
1096 sdata->vif.bss_conf.use_short_slot = 1084 sdata->vif.bss_conf.use_short_slot =
1097 params->use_short_slot_time; 1085 params->use_short_slot_time;
@@ -1135,6 +1123,13 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1135 p.cw_max = params->cwmax; 1123 p.cw_max = params->cwmax;
1136 p.cw_min = params->cwmin; 1124 p.cw_min = params->cwmin;
1137 p.txop = params->txop; 1125 p.txop = params->txop;
1126
1127 /*
1128 * Setting tx queue params disables u-apsd because it's only
1129 * called in master mode.
1130 */
1131 p.uapsd = false;
1132
1138 if (drv_conf_tx(local, params->queue, &p)) { 1133 if (drv_conf_tx(local, params->queue, &p)) {
1139 printk(KERN_DEBUG "%s: failed to set TX queue " 1134 printk(KERN_DEBUG "%s: failed to set TX queue "
1140 "parameters for queue %d\n", 1135 "parameters for queue %d\n",
@@ -1237,6 +1232,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1237 struct ieee80211_local *local = wiphy_priv(wiphy); 1232 struct ieee80211_local *local = wiphy_priv(wiphy);
1238 int err; 1233 int err;
1239 1234
1235 if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
1236 err = drv_set_coverage_class(local, wiphy->coverage_class);
1237
1238 if (err)
1239 return err;
1240 }
1241
1240 if (changed & WIPHY_PARAM_RTS_THRESHOLD) { 1242 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1241 err = drv_set_rts_threshold(local, wiphy->rts_threshold); 1243 err = drv_set_rts_threshold(local, wiphy->rts_threshold);
1242 1244
@@ -1324,6 +1326,50 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
1324} 1326}
1325#endif 1327#endif
1326 1328
1329int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1330 enum ieee80211_smps_mode smps_mode)
1331{
1332 const u8 *ap;
1333 enum ieee80211_smps_mode old_req;
1334 int err;
1335
1336 old_req = sdata->u.mgd.req_smps;
1337 sdata->u.mgd.req_smps = smps_mode;
1338
1339 if (old_req == smps_mode &&
1340 smps_mode != IEEE80211_SMPS_AUTOMATIC)
1341 return 0;
1342
1343 /*
1344 * If not associated, or current association is not an HT
1345 * association, there's no need to send an action frame.
1346 */
1347 if (!sdata->u.mgd.associated ||
1348 sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
1349 mutex_lock(&sdata->local->iflist_mtx);
1350 ieee80211_recalc_smps(sdata->local, sdata);
1351 mutex_unlock(&sdata->local->iflist_mtx);
1352 return 0;
1353 }
1354
1355 ap = sdata->u.mgd.associated->bssid;
1356
1357 if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1358 if (sdata->u.mgd.powersave)
1359 smps_mode = IEEE80211_SMPS_DYNAMIC;
1360 else
1361 smps_mode = IEEE80211_SMPS_OFF;
1362 }
1363
1364 /* send SM PS frame to AP */
1365 err = ieee80211_send_smps_action(sdata, smps_mode,
1366 ap, ap);
1367 if (err)
1368 sdata->u.mgd.req_smps = old_req;
1369
1370 return err;
1371}
1372
1327static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, 1373static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1328 bool enabled, int timeout) 1374 bool enabled, int timeout)
1329{ 1375{
@@ -1344,6 +1390,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1344 sdata->u.mgd.powersave = enabled; 1390 sdata->u.mgd.powersave = enabled;
1345 conf->dynamic_ps_timeout = timeout; 1391 conf->dynamic_ps_timeout = timeout;
1346 1392
1393 /* no change, but if automatic follow powersave */
1394 mutex_lock(&sdata->u.mgd.mtx);
1395 __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
1396 mutex_unlock(&sdata->u.mgd.mtx);
1397
1347 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) 1398 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1348 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 1399 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1349 1400
@@ -1359,39 +1410,52 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1359{ 1410{
1360 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1411 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1361 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1412 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1362 int i, err = -EINVAL; 1413 int i;
1363 u32 target_rate; 1414
1364 struct ieee80211_supported_band *sband; 1415 /*
1416 * This _could_ be supported by providing a hook for
1417 * drivers for this function, but at this point it
1418 * doesn't seem worth bothering.
1419 */
1420 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
1421 return -EOPNOTSUPP;
1365 1422
1366 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1367 1423
1368 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates 1424 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1369 * target_rate = X, rate->fixed = 1 means only rate X 1425 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
1370 * target_rate = X, rate->fixed = 0 means all rates <= X */
1371 sdata->max_ratectrl_rateidx = -1;
1372 sdata->force_unicast_rateidx = -1;
1373 1426
1374 if (mask->fixed) 1427 return 0;
1375 target_rate = mask->fixed / 100; 1428}
1376 else if (mask->maxrate)
1377 target_rate = mask->maxrate / 100;
1378 else
1379 return 0;
1380 1429
1381 for (i=0; i< sband->n_bitrates; i++) { 1430static int ieee80211_remain_on_channel(struct wiphy *wiphy,
1382 struct ieee80211_rate *brate = &sband->bitrates[i]; 1431 struct net_device *dev,
1383 int this_rate = brate->bitrate; 1432 struct ieee80211_channel *chan,
1433 enum nl80211_channel_type channel_type,
1434 unsigned int duration,
1435 u64 *cookie)
1436{
1437 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1384 1438
1385 if (target_rate == this_rate) { 1439 return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
1386 sdata->max_ratectrl_rateidx = i; 1440 duration, cookie);
1387 if (mask->fixed) 1441}
1388 sdata->force_unicast_rateidx = i;
1389 err = 0;
1390 break;
1391 }
1392 }
1393 1442
1394 return err; 1443static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
1444 struct net_device *dev,
1445 u64 cookie)
1446{
1447 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1448
1449 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
1450}
1451
1452static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
1453 struct ieee80211_channel *chan,
1454 enum nl80211_channel_type channel_type,
1455 const u8 *buf, size_t len, u64 *cookie)
1456{
1457 return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan,
1458 channel_type, buf, len, cookie);
1395} 1459}
1396 1460
1397struct cfg80211_ops mac80211_config_ops = { 1461struct cfg80211_ops mac80211_config_ops = {
@@ -1440,4 +1504,7 @@ struct cfg80211_ops mac80211_config_ops = {
1440 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) 1504 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
1441 .set_power_mgmt = ieee80211_set_power_mgmt, 1505 .set_power_mgmt = ieee80211_set_power_mgmt,
1442 .set_bitrate_mask = ieee80211_set_bitrate_mask, 1506 .set_bitrate_mask = ieee80211_set_bitrate_mask,
1507 .remain_on_channel = ieee80211_remain_on_channel,
1508 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1509 .action = ieee80211_action,
1443}; 1510};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e4b54093d41b..637929b65ccc 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -158,6 +158,130 @@ static const struct file_operations noack_ops = {
158 .open = mac80211_open_file_generic 158 .open = mac80211_open_file_generic
159}; 159};
160 160
161static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos)
163{
164 struct ieee80211_local *local = file->private_data;
165 int res;
166 char buf[10];
167
168 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
169
170 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
171}
172
173static ssize_t uapsd_queues_write(struct file *file,
174 const char __user *user_buf,
175 size_t count, loff_t *ppos)
176{
177 struct ieee80211_local *local = file->private_data;
178 unsigned long val;
179 char buf[10];
180 size_t len;
181 int ret;
182
183 len = min(count, sizeof(buf) - 1);
184 if (copy_from_user(buf, user_buf, len))
185 return -EFAULT;
186 buf[len] = '\0';
187
188 ret = strict_strtoul(buf, 0, &val);
189
190 if (ret)
191 return -EINVAL;
192
193 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
194 return -ERANGE;
195
196 local->uapsd_queues = val;
197
198 return count;
199}
200
201static const struct file_operations uapsd_queues_ops = {
202 .read = uapsd_queues_read,
203 .write = uapsd_queues_write,
204 .open = mac80211_open_file_generic
205};
206
207static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
208 size_t count, loff_t *ppos)
209{
210 struct ieee80211_local *local = file->private_data;
211 int res;
212 char buf[10];
213
214 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
215
216 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
217}
218
219static ssize_t uapsd_max_sp_len_write(struct file *file,
220 const char __user *user_buf,
221 size_t count, loff_t *ppos)
222{
223 struct ieee80211_local *local = file->private_data;
224 unsigned long val;
225 char buf[10];
226 size_t len;
227 int ret;
228
229 len = min(count, sizeof(buf) - 1);
230 if (copy_from_user(buf, user_buf, len))
231 return -EFAULT;
232 buf[len] = '\0';
233
234 ret = strict_strtoul(buf, 0, &val);
235
236 if (ret)
237 return -EINVAL;
238
239 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
240 return -ERANGE;
241
242 local->uapsd_max_sp_len = val;
243
244 return count;
245}
246
247static const struct file_operations uapsd_max_sp_len_ops = {
248 .read = uapsd_max_sp_len_read,
249 .write = uapsd_max_sp_len_write,
250 .open = mac80211_open_file_generic
251};
252
253static ssize_t channel_type_read(struct file *file, char __user *user_buf,
254 size_t count, loff_t *ppos)
255{
256 struct ieee80211_local *local = file->private_data;
257 const char *buf;
258
259 switch (local->hw.conf.channel_type) {
260 case NL80211_CHAN_NO_HT:
261 buf = "no ht\n";
262 break;
263 case NL80211_CHAN_HT20:
264 buf = "ht20\n";
265 break;
266 case NL80211_CHAN_HT40MINUS:
267 buf = "ht40-\n";
268 break;
269 case NL80211_CHAN_HT40PLUS:
270 buf = "ht40+\n";
271 break;
272 default:
273 buf = "???";
274 break;
275 }
276
277 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
278}
279
280static const struct file_operations channel_type_ops = {
281 .read = channel_type_read,
282 .open = mac80211_open_file_generic
283};
284
161static ssize_t queues_read(struct file *file, char __user *user_buf, 285static ssize_t queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos) 286 size_t count, loff_t *ppos)
163{ 287{
@@ -314,6 +438,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
314 DEBUGFS_ADD(queues); 438 DEBUGFS_ADD(queues);
315 DEBUGFS_ADD_MODE(reset, 0200); 439 DEBUGFS_ADD_MODE(reset, 0200);
316 DEBUGFS_ADD(noack); 440 DEBUGFS_ADD(noack);
441 DEBUGFS_ADD(uapsd_queues);
442 DEBUGFS_ADD(uapsd_max_sp_len);
443 DEBUGFS_ADD(channel_type);
317 444
318 statsd = debugfs_create_dir("statistics", phyd); 445 statsd = debugfs_create_dir("statistics", phyd);
319 446
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e0f5224630da..97c9e46e859e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kobject.h> 11#include <linux/kobject.h>
12#include <linux/slab.h>
12#include "ieee80211_i.h" 13#include "ieee80211_i.h"
13#include "key.h" 14#include "key.h"
14#include "debugfs.h" 15#include "debugfs.h"
@@ -56,7 +57,7 @@ KEY_CONF_FILE(keyidx, D);
56KEY_CONF_FILE(hw_key_idx, D); 57KEY_CONF_FILE(hw_key_idx, D);
57KEY_FILE(flags, X); 58KEY_FILE(flags, X);
58KEY_FILE(tx_rx_count, D); 59KEY_FILE(tx_rx_count, D);
59KEY_READ(ifindex, sdata->dev->ifindex, 20, "%d\n"); 60KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
60KEY_OPS(ifindex); 61KEY_OPS(ifindex);
61 62
62static ssize_t key_algorithm_read(struct file *file, 63static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 472b2039906c..83d4289d954b 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -13,6 +13,7 @@
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h> 15#include <linux/rtnetlink.h>
16#include <linux/slab.h>
16#include <linux/notifier.h> 17#include <linux/notifier.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include <net/cfg80211.h> 19#include <net/cfg80211.h>
@@ -41,6 +42,34 @@ static ssize_t ieee80211_if_read(
41 return ret; 42 return ret;
42} 43}
43 44
45static ssize_t ieee80211_if_write(
46 struct ieee80211_sub_if_data *sdata,
47 const char __user *userbuf,
48 size_t count, loff_t *ppos,
49 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
50{
51 u8 *buf;
52 ssize_t ret;
53
54 buf = kmalloc(count, GFP_KERNEL);
55 if (!buf)
56 return -ENOMEM;
57
58 ret = -EFAULT;
59 if (copy_from_user(buf, userbuf, count))
60 goto freebuf;
61
62 ret = -ENODEV;
63 rtnl_lock();
64 if (sdata->dev->reg_state == NETREG_REGISTERED)
65 ret = (*write)(sdata, buf, count);
66 rtnl_unlock();
67
68freebuf:
69 kfree(buf);
70 return ret;
71}
72
44#define IEEE80211_IF_FMT(name, field, format_string) \ 73#define IEEE80211_IF_FMT(name, field, format_string) \
45static ssize_t ieee80211_if_fmt_##name( \ 74static ssize_t ieee80211_if_fmt_##name( \
46 const struct ieee80211_sub_if_data *sdata, char *buf, \ 75 const struct ieee80211_sub_if_data *sdata, char *buf, \
@@ -71,7 +100,7 @@ static ssize_t ieee80211_if_fmt_##name( \
71 return scnprintf(buf, buflen, "%pM\n", sdata->field); \ 100 return scnprintf(buf, buflen, "%pM\n", sdata->field); \
72} 101}
73 102
74#define __IEEE80211_IF_FILE(name) \ 103#define __IEEE80211_IF_FILE(name, _write) \
75static ssize_t ieee80211_if_read_##name(struct file *file, \ 104static ssize_t ieee80211_if_read_##name(struct file *file, \
76 char __user *userbuf, \ 105 char __user *userbuf, \
77 size_t count, loff_t *ppos) \ 106 size_t count, loff_t *ppos) \
@@ -82,22 +111,99 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
82} \ 111} \
83static const struct file_operations name##_ops = { \ 112static const struct file_operations name##_ops = { \
84 .read = ieee80211_if_read_##name, \ 113 .read = ieee80211_if_read_##name, \
114 .write = (_write), \
85 .open = mac80211_open_file_generic, \ 115 .open = mac80211_open_file_generic, \
86} 116}
87 117
118#define __IEEE80211_IF_FILE_W(name) \
119static ssize_t ieee80211_if_write_##name(struct file *file, \
120 const char __user *userbuf, \
121 size_t count, loff_t *ppos) \
122{ \
123 return ieee80211_if_write(file->private_data, userbuf, count, \
124 ppos, ieee80211_if_parse_##name); \
125} \
126__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
127
128
88#define IEEE80211_IF_FILE(name, field, format) \ 129#define IEEE80211_IF_FILE(name, field, format) \
89 IEEE80211_IF_FMT_##format(name, field) \ 130 IEEE80211_IF_FMT_##format(name, field) \
90 __IEEE80211_IF_FILE(name) 131 __IEEE80211_IF_FILE(name, NULL)
91 132
92/* common attributes */ 133/* common attributes */
93IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 134IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
94IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC); 135IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
95IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC); 136 HEX);
137IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
138 HEX);
96 139
97/* STA attributes */ 140/* STA attributes */
98IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); 141IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
99IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); 142IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
100IEEE80211_IF_FILE(capab, u.mgd.capab, HEX); 143
144static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
145 enum ieee80211_smps_mode smps_mode)
146{
147 struct ieee80211_local *local = sdata->local;
148 int err;
149
150 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
151 smps_mode == IEEE80211_SMPS_STATIC)
152 return -EINVAL;
153
154 /* auto should be dynamic if in PS mode */
155 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
156 (smps_mode == IEEE80211_SMPS_DYNAMIC ||
157 smps_mode == IEEE80211_SMPS_AUTOMATIC))
158 return -EINVAL;
159
160 /* supported only on managed interfaces for now */
161 if (sdata->vif.type != NL80211_IFTYPE_STATION)
162 return -EOPNOTSUPP;
163
164 mutex_lock(&local->iflist_mtx);
165 err = __ieee80211_request_smps(sdata, smps_mode);
166 mutex_unlock(&local->iflist_mtx);
167
168 return err;
169}
170
171static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
172 [IEEE80211_SMPS_AUTOMATIC] = "auto",
173 [IEEE80211_SMPS_OFF] = "off",
174 [IEEE80211_SMPS_STATIC] = "static",
175 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
176};
177
178static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
179 char *buf, int buflen)
180{
181 if (sdata->vif.type != NL80211_IFTYPE_STATION)
182 return -EOPNOTSUPP;
183
184 return snprintf(buf, buflen, "request: %s\nused: %s\n",
185 smps_modes[sdata->u.mgd.req_smps],
186 smps_modes[sdata->u.mgd.ap_smps]);
187}
188
189static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
190 const char *buf, int buflen)
191{
192 enum ieee80211_smps_mode mode;
193
194 for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
195 if (strncmp(buf, smps_modes[mode], buflen) == 0) {
196 int err = ieee80211_set_smps(sdata, mode);
197 if (!err)
198 return buflen;
199 return err;
200 }
201 }
202
203 return -EINVAL;
204}
205
206__IEEE80211_IF_FILE_W(smps);
101 207
102/* AP attributes */ 208/* AP attributes */
103IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 209IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -109,7 +215,7 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
109 return scnprintf(buf, buflen, "%u\n", 215 return scnprintf(buf, buflen, "%u\n",
110 skb_queue_len(&sdata->u.ap.ps_bc_buf)); 216 skb_queue_len(&sdata->u.ap.ps_bc_buf));
111} 217}
112__IEEE80211_IF_FILE(num_buffered_multicast); 218__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
113 219
114/* WDS attributes */ 220/* WDS attributes */
115IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); 221IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -154,46 +260,50 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
154#endif 260#endif
155 261
156 262
157#define DEBUGFS_ADD(name, type) \ 263#define DEBUGFS_ADD(name) \
158 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ 264 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
159 sdata, &name##_ops); 265 sdata, &name##_ops);
160 266
267#define DEBUGFS_ADD_MODE(name, mode) \
268 debugfs_create_file(#name, mode, sdata->debugfs.dir, \
269 sdata, &name##_ops);
270
161static void add_sta_files(struct ieee80211_sub_if_data *sdata) 271static void add_sta_files(struct ieee80211_sub_if_data *sdata)
162{ 272{
163 DEBUGFS_ADD(drop_unencrypted, sta); 273 DEBUGFS_ADD(drop_unencrypted);
164 DEBUGFS_ADD(force_unicast_rateidx, sta); 274 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
165 DEBUGFS_ADD(max_ratectrl_rateidx, sta); 275 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
166 276
167 DEBUGFS_ADD(bssid, sta); 277 DEBUGFS_ADD(bssid);
168 DEBUGFS_ADD(aid, sta); 278 DEBUGFS_ADD(aid);
169 DEBUGFS_ADD(capab, sta); 279 DEBUGFS_ADD_MODE(smps, 0600);
170} 280}
171 281
172static void add_ap_files(struct ieee80211_sub_if_data *sdata) 282static void add_ap_files(struct ieee80211_sub_if_data *sdata)
173{ 283{
174 DEBUGFS_ADD(drop_unencrypted, ap); 284 DEBUGFS_ADD(drop_unencrypted);
175 DEBUGFS_ADD(force_unicast_rateidx, ap); 285 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
176 DEBUGFS_ADD(max_ratectrl_rateidx, ap); 286 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
177 287
178 DEBUGFS_ADD(num_sta_ps, ap); 288 DEBUGFS_ADD(num_sta_ps);
179 DEBUGFS_ADD(dtim_count, ap); 289 DEBUGFS_ADD(dtim_count);
180 DEBUGFS_ADD(num_buffered_multicast, ap); 290 DEBUGFS_ADD(num_buffered_multicast);
181} 291}
182 292
183static void add_wds_files(struct ieee80211_sub_if_data *sdata) 293static void add_wds_files(struct ieee80211_sub_if_data *sdata)
184{ 294{
185 DEBUGFS_ADD(drop_unencrypted, wds); 295 DEBUGFS_ADD(drop_unencrypted);
186 DEBUGFS_ADD(force_unicast_rateidx, wds); 296 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
187 DEBUGFS_ADD(max_ratectrl_rateidx, wds); 297 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
188 298
189 DEBUGFS_ADD(peer, wds); 299 DEBUGFS_ADD(peer);
190} 300}
191 301
192static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 302static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
193{ 303{
194 DEBUGFS_ADD(drop_unencrypted, vlan); 304 DEBUGFS_ADD(drop_unencrypted);
195 DEBUGFS_ADD(force_unicast_rateidx, vlan); 305 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
196 DEBUGFS_ADD(max_ratectrl_rateidx, vlan); 306 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
197} 307}
198 308
199static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 309static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -280,16 +390,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
280 } 390 }
281} 391}
282 392
283static int notif_registered;
284
285void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) 393void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
286{ 394{
287 char buf[10+IFNAMSIZ]; 395 char buf[10+IFNAMSIZ];
288 396
289 if (!notif_registered) 397 sprintf(buf, "netdev:%s", sdata->name);
290 return;
291
292 sprintf(buf, "netdev:%s", sdata->dev->name);
293 sdata->debugfs.dir = debugfs_create_dir(buf, 398 sdata->debugfs.dir = debugfs_create_dir(buf,
294 sdata->local->hw.wiphy->debugfsdir); 399 sdata->local->hw.wiphy->debugfsdir);
295 add_files(sdata); 400 add_files(sdata);
@@ -304,58 +409,18 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
304 sdata->debugfs.dir = NULL; 409 sdata->debugfs.dir = NULL;
305} 410}
306 411
307static int netdev_notify(struct notifier_block *nb, 412void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
308 unsigned long state,
309 void *ndev)
310{ 413{
311 struct net_device *dev = ndev;
312 struct dentry *dir; 414 struct dentry *dir;
313 struct ieee80211_sub_if_data *sdata; 415 char buf[10 + IFNAMSIZ];
314 char buf[10+IFNAMSIZ];
315
316 if (state != NETDEV_CHANGENAME)
317 return 0;
318
319 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
320 return 0;
321
322 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
323 return 0;
324
325 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
326 416
327 dir = sdata->debugfs.dir; 417 dir = sdata->debugfs.dir;
328 418
329 if (!dir) 419 if (!dir)
330 return 0; 420 return;
331 421
332 sprintf(buf, "netdev:%s", dev->name); 422 sprintf(buf, "netdev:%s", sdata->name);
333 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) 423 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
334 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " 424 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
335 "dir to %s\n", buf); 425 "dir to %s\n", buf);
336
337 return 0;
338}
339
340static struct notifier_block mac80211_debugfs_netdev_notifier = {
341 .notifier_call = netdev_notify,
342};
343
344void ieee80211_debugfs_netdev_init(void)
345{
346 int err;
347
348 err = register_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
349 if (err) {
350 printk(KERN_ERR
351 "mac80211: failed to install netdev notifier,"
352 " disabling per-netdev debugfs!\n");
353 } else
354 notif_registered = 1;
355}
356
357void ieee80211_debugfs_netdev_exit(void)
358{
359 unregister_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
360 notif_registered = 0;
361} 426}
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 7af731f0b731..79025e79f4d6 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -6,8 +6,7 @@
6#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); 7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); 8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_netdev_init(void); 9void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
10void ieee80211_debugfs_netdev_exit(void);
11#else 10#else
12static inline void ieee80211_debugfs_add_netdev( 11static inline void ieee80211_debugfs_add_netdev(
13 struct ieee80211_sub_if_data *sdata) 12 struct ieee80211_sub_if_data *sdata)
@@ -15,10 +14,8 @@ static inline void ieee80211_debugfs_add_netdev(
15static inline void ieee80211_debugfs_remove_netdev( 14static inline void ieee80211_debugfs_remove_netdev(
16 struct ieee80211_sub_if_data *sdata) 15 struct ieee80211_sub_if_data *sdata)
17{} 16{}
18static inline void ieee80211_debugfs_netdev_init(void) 17static inline void ieee80211_debugfs_rename_netdev(
19{} 18 struct ieee80211_sub_if_data *sdata)
20
21static inline void ieee80211_debugfs_netdev_exit(void)
22{} 19{}
23#endif 20#endif
24 21
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3f41608c8081..d92800bb2d2f 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -44,7 +44,7 @@ static const struct file_operations sta_ ##name## _ops = { \
44 STA_OPS(name) 44 STA_OPS(name)
45 45
46STA_FILE(aid, sta.aid, D); 46STA_FILE(aid, sta.aid, D);
47STA_FILE(dev, sdata->dev->name, S); 47STA_FILE(dev, sdata->name, S);
48STA_FILE(rx_packets, rx_packets, LU); 48STA_FILE(rx_packets, rx_packets, LU);
49STA_FILE(tx_packets, tx_packets, LU); 49STA_FILE(tx_packets, tx_packets, LU);
50STA_FILE(rx_bytes, rx_bytes, LU); 50STA_FILE(rx_bytes, rx_bytes, LU);
@@ -120,36 +120,38 @@ STA_OPS(last_seq_ctrl);
120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
121 size_t count, loff_t *ppos) 121 size_t count, loff_t *ppos)
122{ 122{
123 char buf[30 + STA_TID_NUM * 70], *p = buf; 123 char buf[64 + STA_TID_NUM * 40], *p = buf;
124 int i; 124 int i;
125 struct sta_info *sta = file->private_data; 125 struct sta_info *sta = file->private_data;
126 126
127 spin_lock_bh(&sta->lock); 127 spin_lock_bh(&sta->lock);
128 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n", 128 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
129 sta->ampdu_mlme.dialog_token_allocator + 1); 129 sta->ampdu_mlme.dialog_token_allocator + 1);
130 p += scnprintf(p, sizeof(buf) + buf - p,
131 "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
130 for (i = 0; i < STA_TID_NUM; i++) { 132 for (i = 0; i < STA_TID_NUM; i++) {
131 p += scnprintf(p, sizeof(buf)+buf-p, "TID %02d:", i); 133 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
132 p += scnprintf(p, sizeof(buf)+buf-p, " RX=%x", 134 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
133 sta->ampdu_mlme.tid_state_rx[i]); 135 sta->ampdu_mlme.tid_state_rx[i]);
134 p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", 136 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
135 sta->ampdu_mlme.tid_state_rx[i] ? 137 sta->ampdu_mlme.tid_state_rx[i] ?
136 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); 138 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
137 p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", 139 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
138 sta->ampdu_mlme.tid_state_rx[i] ? 140 sta->ampdu_mlme.tid_state_rx[i] ?
139 sta->ampdu_mlme.tid_rx[i]->ssn : 0); 141 sta->ampdu_mlme.tid_rx[i]->ssn : 0);
140 142
141 p += scnprintf(p, sizeof(buf)+buf-p, " TX=%x", 143 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
142 sta->ampdu_mlme.tid_state_tx[i]); 144 sta->ampdu_mlme.tid_state_tx[i]);
143 p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", 145 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
144 sta->ampdu_mlme.tid_state_tx[i] ? 146 sta->ampdu_mlme.tid_state_tx[i] ?
145 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); 147 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
146 p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", 148 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
147 sta->ampdu_mlme.tid_state_tx[i] ? 149 sta->ampdu_mlme.tid_state_tx[i] ?
148 sta->ampdu_mlme.tid_tx[i]->ssn : 0); 150 sta->ampdu_mlme.tid_tx[i]->ssn : 0);
149 p += scnprintf(p, sizeof(buf)+buf-p, "/pending=%03d", 151 p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
150 sta->ampdu_mlme.tid_state_tx[i] ? 152 sta->ampdu_mlme.tid_state_tx[i] ?
151 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); 153 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
152 p += scnprintf(p, sizeof(buf)+buf-p, "\n"); 154 p += scnprintf(p, sizeof(buf) + buf - p, "\n");
153 } 155 }
154 spin_unlock_bh(&sta->lock); 156 spin_unlock_bh(&sta->lock);
155 157
@@ -160,7 +162,12 @@ STA_OPS(agg_status);
160static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, 162static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
161 size_t count, loff_t *ppos) 163 size_t count, loff_t *ppos)
162{ 164{
163 char buf[200], *p = buf; 165#define PRINT_HT_CAP(_cond, _str) \
166 do { \
167 if (_cond) \
168 p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
169 } while (0)
170 char buf[512], *p = buf;
164 int i; 171 int i;
165 struct sta_info *sta = file->private_data; 172 struct sta_info *sta = file->private_data;
166 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; 173 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
@@ -168,15 +175,64 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
168 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", 175 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
169 htc->ht_supported ? "" : "not "); 176 htc->ht_supported ? "" : "not ");
170 if (htc->ht_supported) { 177 if (htc->ht_supported) {
171 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap); 178 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
179
180 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
181 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
182 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
183
184 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
185 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
186 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
187
188 PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
189 PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
190 PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
191 PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
192
193 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
194 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
195 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
196 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
197
198 PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
199
200 PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
201 "3839 bytes");
202 PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
203 "7935 bytes");
204
205 /*
206 * For beacons and probe response this would mean the BSS
207 * does or does not allow the usage of DSSS/CCK HT40.
208 * Otherwise it means the STA does or does not use
209 * DSSS/CCK HT40.
210 */
211 PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
212 PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
213
214 /* BIT(13) is reserved */
215
216 PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
217
218 PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
219
172 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", 220 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
173 htc->ampdu_factor, htc->ampdu_density); 221 htc->ampdu_factor, htc->ampdu_density);
174 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); 222 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
223
175 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 224 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", 225 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
177 htc->mcs.rx_mask[i]); 226 htc->mcs.rx_mask[i]);
178 p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n", 227 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
179 le16_to_cpu(htc->mcs.rx_highest)); 228
229 /* If not set this is meaningless */
230 if (le16_to_cpu(htc->mcs.rx_highest)) {
231 p += scnprintf(p, sizeof(buf)+buf-p,
232 "MCS rx highest: %d Mbps\n",
233 le16_to_cpu(htc->mcs.rx_highest));
234 }
235
180 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", 236 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
181 htc->mcs.tx_params); 237 htc->mcs.tx_params);
182 } 238 }
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 921dd9c9ff62..c3d844093a2f 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -14,6 +14,8 @@ static inline int drv_start(struct ieee80211_local *local)
14{ 14{
15 int ret; 15 int ret;
16 16
17 might_sleep();
18
17 local->started = true; 19 local->started = true;
18 smp_mb(); 20 smp_mb();
19 ret = local->ops->start(&local->hw); 21 ret = local->ops->start(&local->hw);
@@ -23,6 +25,8 @@ static inline int drv_start(struct ieee80211_local *local)
23 25
24static inline void drv_stop(struct ieee80211_local *local) 26static inline void drv_stop(struct ieee80211_local *local)
25{ 27{
28 might_sleep();
29
26 local->ops->stop(&local->hw); 30 local->ops->stop(&local->hw);
27 trace_drv_stop(local); 31 trace_drv_stop(local);
28 32
@@ -36,35 +40,47 @@ static inline void drv_stop(struct ieee80211_local *local)
36} 40}
37 41
38static inline int drv_add_interface(struct ieee80211_local *local, 42static inline int drv_add_interface(struct ieee80211_local *local,
39 struct ieee80211_if_init_conf *conf) 43 struct ieee80211_vif *vif)
40{ 44{
41 int ret = local->ops->add_interface(&local->hw, conf); 45 int ret;
42 trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret); 46
47 might_sleep();
48
49 ret = local->ops->add_interface(&local->hw, vif);
50 trace_drv_add_interface(local, vif_to_sdata(vif), ret);
43 return ret; 51 return ret;
44} 52}
45 53
46static inline void drv_remove_interface(struct ieee80211_local *local, 54static inline void drv_remove_interface(struct ieee80211_local *local,
47 struct ieee80211_if_init_conf *conf) 55 struct ieee80211_vif *vif)
48{ 56{
49 local->ops->remove_interface(&local->hw, conf); 57 might_sleep();
50 trace_drv_remove_interface(local, conf->mac_addr, conf->vif); 58
59 local->ops->remove_interface(&local->hw, vif);
60 trace_drv_remove_interface(local, vif_to_sdata(vif));
51} 61}
52 62
53static inline int drv_config(struct ieee80211_local *local, u32 changed) 63static inline int drv_config(struct ieee80211_local *local, u32 changed)
54{ 64{
55 int ret = local->ops->config(&local->hw, changed); 65 int ret;
66
67 might_sleep();
68
69 ret = local->ops->config(&local->hw, changed);
56 trace_drv_config(local, changed, ret); 70 trace_drv_config(local, changed, ret);
57 return ret; 71 return ret;
58} 72}
59 73
60static inline void drv_bss_info_changed(struct ieee80211_local *local, 74static inline void drv_bss_info_changed(struct ieee80211_local *local,
61 struct ieee80211_vif *vif, 75 struct ieee80211_sub_if_data *sdata,
62 struct ieee80211_bss_conf *info, 76 struct ieee80211_bss_conf *info,
63 u32 changed) 77 u32 changed)
64{ 78{
79 might_sleep();
80
65 if (local->ops->bss_info_changed) 81 if (local->ops->bss_info_changed)
66 local->ops->bss_info_changed(&local->hw, vif, info, changed); 82 local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
67 trace_drv_bss_info_changed(local, vif, info, changed); 83 trace_drv_bss_info_changed(local, sdata, info, changed);
68} 84}
69 85
70static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 86static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -106,36 +122,53 @@ static inline int drv_set_tim(struct ieee80211_local *local,
106} 122}
107 123
108static inline int drv_set_key(struct ieee80211_local *local, 124static inline int drv_set_key(struct ieee80211_local *local,
109 enum set_key_cmd cmd, struct ieee80211_vif *vif, 125 enum set_key_cmd cmd,
126 struct ieee80211_sub_if_data *sdata,
110 struct ieee80211_sta *sta, 127 struct ieee80211_sta *sta,
111 struct ieee80211_key_conf *key) 128 struct ieee80211_key_conf *key)
112{ 129{
113 int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key); 130 int ret;
114 trace_drv_set_key(local, cmd, vif, sta, key, ret); 131
132 might_sleep();
133
134 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
135 trace_drv_set_key(local, cmd, sdata, sta, key, ret);
115 return ret; 136 return ret;
116} 137}
117 138
118static inline void drv_update_tkip_key(struct ieee80211_local *local, 139static inline void drv_update_tkip_key(struct ieee80211_local *local,
140 struct ieee80211_sub_if_data *sdata,
119 struct ieee80211_key_conf *conf, 141 struct ieee80211_key_conf *conf,
120 const u8 *address, u32 iv32, 142 struct sta_info *sta, u32 iv32,
121 u16 *phase1key) 143 u16 *phase1key)
122{ 144{
145 struct ieee80211_sta *ista = NULL;
146
147 if (sta)
148 ista = &sta->sta;
149
123 if (local->ops->update_tkip_key) 150 if (local->ops->update_tkip_key)
124 local->ops->update_tkip_key(&local->hw, conf, address, 151 local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
125 iv32, phase1key); 152 ista, iv32, phase1key);
126 trace_drv_update_tkip_key(local, conf, address, iv32); 153 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
127} 154}
128 155
129static inline int drv_hw_scan(struct ieee80211_local *local, 156static inline int drv_hw_scan(struct ieee80211_local *local,
130 struct cfg80211_scan_request *req) 157 struct cfg80211_scan_request *req)
131{ 158{
132 int ret = local->ops->hw_scan(&local->hw, req); 159 int ret;
160
161 might_sleep();
162
163 ret = local->ops->hw_scan(&local->hw, req);
133 trace_drv_hw_scan(local, req, ret); 164 trace_drv_hw_scan(local, req, ret);
134 return ret; 165 return ret;
135} 166}
136 167
137static inline void drv_sw_scan_start(struct ieee80211_local *local) 168static inline void drv_sw_scan_start(struct ieee80211_local *local)
138{ 169{
170 might_sleep();
171
139 if (local->ops->sw_scan_start) 172 if (local->ops->sw_scan_start)
140 local->ops->sw_scan_start(&local->hw); 173 local->ops->sw_scan_start(&local->hw);
141 trace_drv_sw_scan_start(local); 174 trace_drv_sw_scan_start(local);
@@ -143,6 +176,8 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
143 176
144static inline void drv_sw_scan_complete(struct ieee80211_local *local) 177static inline void drv_sw_scan_complete(struct ieee80211_local *local)
145{ 178{
179 might_sleep();
180
146 if (local->ops->sw_scan_complete) 181 if (local->ops->sw_scan_complete)
147 local->ops->sw_scan_complete(&local->hw); 182 local->ops->sw_scan_complete(&local->hw);
148 trace_drv_sw_scan_complete(local); 183 trace_drv_sw_scan_complete(local);
@@ -153,6 +188,8 @@ static inline int drv_get_stats(struct ieee80211_local *local,
153{ 188{
154 int ret = -EOPNOTSUPP; 189 int ret = -EOPNOTSUPP;
155 190
191 might_sleep();
192
156 if (local->ops->get_stats) 193 if (local->ops->get_stats)
157 ret = local->ops->get_stats(&local->hw, stats); 194 ret = local->ops->get_stats(&local->hw, stats);
158 trace_drv_get_stats(local, stats, ret); 195 trace_drv_get_stats(local, stats, ret);
@@ -172,43 +209,93 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
172 u32 value) 209 u32 value)
173{ 210{
174 int ret = 0; 211 int ret = 0;
212
213 might_sleep();
214
175 if (local->ops->set_rts_threshold) 215 if (local->ops->set_rts_threshold)
176 ret = local->ops->set_rts_threshold(&local->hw, value); 216 ret = local->ops->set_rts_threshold(&local->hw, value);
177 trace_drv_set_rts_threshold(local, value, ret); 217 trace_drv_set_rts_threshold(local, value, ret);
178 return ret; 218 return ret;
179} 219}
180 220
221static inline int drv_set_coverage_class(struct ieee80211_local *local,
222 u8 value)
223{
224 int ret = 0;
225 might_sleep();
226
227 if (local->ops->set_coverage_class)
228 local->ops->set_coverage_class(&local->hw, value);
229 else
230 ret = -EOPNOTSUPP;
231
232 trace_drv_set_coverage_class(local, value, ret);
233 return ret;
234}
235
181static inline void drv_sta_notify(struct ieee80211_local *local, 236static inline void drv_sta_notify(struct ieee80211_local *local,
182 struct ieee80211_vif *vif, 237 struct ieee80211_sub_if_data *sdata,
183 enum sta_notify_cmd cmd, 238 enum sta_notify_cmd cmd,
184 struct ieee80211_sta *sta) 239 struct ieee80211_sta *sta)
185{ 240{
186 if (local->ops->sta_notify) 241 if (local->ops->sta_notify)
187 local->ops->sta_notify(&local->hw, vif, cmd, sta); 242 local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
188 trace_drv_sta_notify(local, vif, cmd, sta); 243 trace_drv_sta_notify(local, sdata, cmd, sta);
244}
245
246static inline int drv_sta_add(struct ieee80211_local *local,
247 struct ieee80211_sub_if_data *sdata,
248 struct ieee80211_sta *sta)
249{
250 int ret = 0;
251
252 might_sleep();
253
254 if (local->ops->sta_add)
255 ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
256 else if (local->ops->sta_notify)
257 local->ops->sta_notify(&local->hw, &sdata->vif,
258 STA_NOTIFY_ADD, sta);
259
260 trace_drv_sta_add(local, sdata, sta, ret);
261
262 return ret;
263}
264
265static inline void drv_sta_remove(struct ieee80211_local *local,
266 struct ieee80211_sub_if_data *sdata,
267 struct ieee80211_sta *sta)
268{
269 might_sleep();
270
271 if (local->ops->sta_remove)
272 local->ops->sta_remove(&local->hw, &sdata->vif, sta);
273 else if (local->ops->sta_notify)
274 local->ops->sta_notify(&local->hw, &sdata->vif,
275 STA_NOTIFY_REMOVE, sta);
276
277 trace_drv_sta_remove(local, sdata, sta);
189} 278}
190 279
191static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, 280static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
192 const struct ieee80211_tx_queue_params *params) 281 const struct ieee80211_tx_queue_params *params)
193{ 282{
194 int ret = -EOPNOTSUPP; 283 int ret = -EOPNOTSUPP;
284
285 might_sleep();
286
195 if (local->ops->conf_tx) 287 if (local->ops->conf_tx)
196 ret = local->ops->conf_tx(&local->hw, queue, params); 288 ret = local->ops->conf_tx(&local->hw, queue, params);
197 trace_drv_conf_tx(local, queue, params, ret); 289 trace_drv_conf_tx(local, queue, params, ret);
198 return ret; 290 return ret;
199} 291}
200 292
201static inline int drv_get_tx_stats(struct ieee80211_local *local,
202 struct ieee80211_tx_queue_stats *stats)
203{
204 int ret = local->ops->get_tx_stats(&local->hw, stats);
205 trace_drv_get_tx_stats(local, stats, ret);
206 return ret;
207}
208
209static inline u64 drv_get_tsf(struct ieee80211_local *local) 293static inline u64 drv_get_tsf(struct ieee80211_local *local)
210{ 294{
211 u64 ret = -1ULL; 295 u64 ret = -1ULL;
296
297 might_sleep();
298
212 if (local->ops->get_tsf) 299 if (local->ops->get_tsf)
213 ret = local->ops->get_tsf(&local->hw); 300 ret = local->ops->get_tsf(&local->hw);
214 trace_drv_get_tsf(local, ret); 301 trace_drv_get_tsf(local, ret);
@@ -217,6 +304,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
217 304
218static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) 305static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
219{ 306{
307 might_sleep();
308
220 if (local->ops->set_tsf) 309 if (local->ops->set_tsf)
221 local->ops->set_tsf(&local->hw, tsf); 310 local->ops->set_tsf(&local->hw, tsf);
222 trace_drv_set_tsf(local, tsf); 311 trace_drv_set_tsf(local, tsf);
@@ -224,6 +313,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
224 313
225static inline void drv_reset_tsf(struct ieee80211_local *local) 314static inline void drv_reset_tsf(struct ieee80211_local *local)
226{ 315{
316 might_sleep();
317
227 if (local->ops->reset_tsf) 318 if (local->ops->reset_tsf)
228 local->ops->reset_tsf(&local->hw); 319 local->ops->reset_tsf(&local->hw);
229 trace_drv_reset_tsf(local); 320 trace_drv_reset_tsf(local);
@@ -232,6 +323,9 @@ static inline void drv_reset_tsf(struct ieee80211_local *local)
232static inline int drv_tx_last_beacon(struct ieee80211_local *local) 323static inline int drv_tx_last_beacon(struct ieee80211_local *local)
233{ 324{
234 int ret = 1; 325 int ret = 1;
326
327 might_sleep();
328
235 if (local->ops->tx_last_beacon) 329 if (local->ops->tx_last_beacon)
236 ret = local->ops->tx_last_beacon(&local->hw); 330 ret = local->ops->tx_last_beacon(&local->hw);
237 trace_drv_tx_last_beacon(local, ret); 331 trace_drv_tx_last_beacon(local, ret);
@@ -239,23 +333,34 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
239} 333}
240 334
241static inline int drv_ampdu_action(struct ieee80211_local *local, 335static inline int drv_ampdu_action(struct ieee80211_local *local,
242 struct ieee80211_vif *vif, 336 struct ieee80211_sub_if_data *sdata,
243 enum ieee80211_ampdu_mlme_action action, 337 enum ieee80211_ampdu_mlme_action action,
244 struct ieee80211_sta *sta, u16 tid, 338 struct ieee80211_sta *sta, u16 tid,
245 u16 *ssn) 339 u16 *ssn)
246{ 340{
247 int ret = -EOPNOTSUPP; 341 int ret = -EOPNOTSUPP;
248 if (local->ops->ampdu_action) 342 if (local->ops->ampdu_action)
249 ret = local->ops->ampdu_action(&local->hw, vif, action, 343 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
250 sta, tid, ssn); 344 sta, tid, ssn);
251 trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret); 345 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
252 return ret; 346 return ret;
253} 347}
254 348
255 349
256static inline void drv_rfkill_poll(struct ieee80211_local *local) 350static inline void drv_rfkill_poll(struct ieee80211_local *local)
257{ 351{
352 might_sleep();
353
258 if (local->ops->rfkill_poll) 354 if (local->ops->rfkill_poll)
259 local->ops->rfkill_poll(&local->hw); 355 local->ops->rfkill_poll(&local->hw);
260} 356}
357
358static inline void drv_flush(struct ieee80211_local *local, bool drop)
359{
360 might_sleep();
361
362 trace_drv_flush(local, drop);
363 if (local->ops->flush)
364 local->ops->flush(&local->hw, drop);
365}
261#endif /* __MAC80211_DRIVER_OPS */ 366#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index da8497ef7063..41baf730a5c7 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,10 +25,12 @@ static inline void trace_ ## name(proto) {}
25#define STA_PR_FMT " sta:%pM" 25#define STA_PR_FMT " sta:%pM"
26#define STA_PR_ARG __entry->sta_addr 26#define STA_PR_ARG __entry->sta_addr
27 27
28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif) 28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
29#define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif 29 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
30#define VIF_PR_FMT " vif:%p(%d)" 30#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
31#define VIF_PR_ARG __entry->vif, __entry->vif_type 31 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
32#define VIF_PR_FMT " vif:%s(%d)"
33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
32 34
33TRACE_EVENT(drv_start, 35TRACE_EVENT(drv_start,
34 TP_PROTO(struct ieee80211_local *local, int ret), 36 TP_PROTO(struct ieee80211_local *local, int ret),
@@ -70,11 +72,10 @@ TRACE_EVENT(drv_stop,
70 72
71TRACE_EVENT(drv_add_interface, 73TRACE_EVENT(drv_add_interface,
72 TP_PROTO(struct ieee80211_local *local, 74 TP_PROTO(struct ieee80211_local *local,
73 const u8 *addr, 75 struct ieee80211_sub_if_data *sdata,
74 struct ieee80211_vif *vif,
75 int ret), 76 int ret),
76 77
77 TP_ARGS(local, addr, vif, ret), 78 TP_ARGS(local, sdata, ret),
78 79
79 TP_STRUCT__entry( 80 TP_STRUCT__entry(
80 LOCAL_ENTRY 81 LOCAL_ENTRY
@@ -86,7 +87,7 @@ TRACE_EVENT(drv_add_interface,
86 TP_fast_assign( 87 TP_fast_assign(
87 LOCAL_ASSIGN; 88 LOCAL_ASSIGN;
88 VIF_ASSIGN; 89 VIF_ASSIGN;
89 memcpy(__entry->addr, addr, 6); 90 memcpy(__entry->addr, sdata->vif.addr, 6);
90 __entry->ret = ret; 91 __entry->ret = ret;
91 ), 92 ),
92 93
@@ -97,10 +98,9 @@ TRACE_EVENT(drv_add_interface,
97); 98);
98 99
99TRACE_EVENT(drv_remove_interface, 100TRACE_EVENT(drv_remove_interface,
100 TP_PROTO(struct ieee80211_local *local, 101 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
101 const u8 *addr, struct ieee80211_vif *vif),
102 102
103 TP_ARGS(local, addr, vif), 103 TP_ARGS(local, sdata),
104 104
105 TP_STRUCT__entry( 105 TP_STRUCT__entry(
106 LOCAL_ENTRY 106 LOCAL_ENTRY
@@ -111,7 +111,7 @@ TRACE_EVENT(drv_remove_interface,
111 TP_fast_assign( 111 TP_fast_assign(
112 LOCAL_ASSIGN; 112 LOCAL_ASSIGN;
113 VIF_ASSIGN; 113 VIF_ASSIGN;
114 memcpy(__entry->addr, addr, 6); 114 memcpy(__entry->addr, sdata->vif.addr, 6);
115 ), 115 ),
116 116
117 TP_printk( 117 TP_printk(
@@ -140,6 +140,7 @@ TRACE_EVENT(drv_config,
140 __field(u8, short_frame_max_tx_count) 140 __field(u8, short_frame_max_tx_count)
141 __field(int, center_freq) 141 __field(int, center_freq)
142 __field(int, channel_type) 142 __field(int, channel_type)
143 __field(int, smps)
143 ), 144 ),
144 145
145 TP_fast_assign( 146 TP_fast_assign(
@@ -155,6 +156,7 @@ TRACE_EVENT(drv_config,
155 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; 156 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
156 __entry->center_freq = local->hw.conf.channel->center_freq; 157 __entry->center_freq = local->hw.conf.channel->center_freq;
157 __entry->channel_type = local->hw.conf.channel_type; 158 __entry->channel_type = local->hw.conf.channel_type;
159 __entry->smps = local->hw.conf.smps_mode;
158 ), 160 ),
159 161
160 TP_printk( 162 TP_printk(
@@ -165,11 +167,11 @@ TRACE_EVENT(drv_config,
165 167
166TRACE_EVENT(drv_bss_info_changed, 168TRACE_EVENT(drv_bss_info_changed,
167 TP_PROTO(struct ieee80211_local *local, 169 TP_PROTO(struct ieee80211_local *local,
168 struct ieee80211_vif *vif, 170 struct ieee80211_sub_if_data *sdata,
169 struct ieee80211_bss_conf *info, 171 struct ieee80211_bss_conf *info,
170 u32 changed), 172 u32 changed),
171 173
172 TP_ARGS(local, vif, info, changed), 174 TP_ARGS(local, sdata, info, changed),
173 175
174 TP_STRUCT__entry( 176 TP_STRUCT__entry(
175 LOCAL_ENTRY 177 LOCAL_ENTRY
@@ -293,11 +295,11 @@ TRACE_EVENT(drv_set_tim,
293 295
294TRACE_EVENT(drv_set_key, 296TRACE_EVENT(drv_set_key,
295 TP_PROTO(struct ieee80211_local *local, 297 TP_PROTO(struct ieee80211_local *local,
296 enum set_key_cmd cmd, struct ieee80211_vif *vif, 298 enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
297 struct ieee80211_sta *sta, 299 struct ieee80211_sta *sta,
298 struct ieee80211_key_conf *key, int ret), 300 struct ieee80211_key_conf *key, int ret),
299 301
300 TP_ARGS(local, cmd, vif, sta, key, ret), 302 TP_ARGS(local, cmd, sdata, sta, key, ret),
301 303
302 TP_STRUCT__entry( 304 TP_STRUCT__entry(
303 LOCAL_ENTRY 305 LOCAL_ENTRY
@@ -329,26 +331,29 @@ TRACE_EVENT(drv_set_key,
329 331
330TRACE_EVENT(drv_update_tkip_key, 332TRACE_EVENT(drv_update_tkip_key,
331 TP_PROTO(struct ieee80211_local *local, 333 TP_PROTO(struct ieee80211_local *local,
334 struct ieee80211_sub_if_data *sdata,
332 struct ieee80211_key_conf *conf, 335 struct ieee80211_key_conf *conf,
333 const u8 *address, u32 iv32), 336 struct ieee80211_sta *sta, u32 iv32),
334 337
335 TP_ARGS(local, conf, address, iv32), 338 TP_ARGS(local, sdata, conf, sta, iv32),
336 339
337 TP_STRUCT__entry( 340 TP_STRUCT__entry(
338 LOCAL_ENTRY 341 LOCAL_ENTRY
339 __array(u8, addr, 6) 342 VIF_ENTRY
343 STA_ENTRY
340 __field(u32, iv32) 344 __field(u32, iv32)
341 ), 345 ),
342 346
343 TP_fast_assign( 347 TP_fast_assign(
344 LOCAL_ASSIGN; 348 LOCAL_ASSIGN;
345 memcpy(__entry->addr, address, 6); 349 VIF_ASSIGN;
350 STA_ASSIGN;
346 __entry->iv32 = iv32; 351 __entry->iv32 = iv32;
347 ), 352 ),
348 353
349 TP_printk( 354 TP_printk(
350 LOCAL_PR_FMT " addr:%pM iv32:%#x", 355 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x",
351 LOCAL_PR_ARG, __entry->addr, __entry->iv32 356 LOCAL_PR_ARG,VIF_PR_ARG,STA_PR_ARG, __entry->iv32
352 ) 357 )
353); 358);
354 359
@@ -489,13 +494,36 @@ TRACE_EVENT(drv_set_rts_threshold,
489 ) 494 )
490); 495);
491 496
497TRACE_EVENT(drv_set_coverage_class,
498 TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
499
500 TP_ARGS(local, value, ret),
501
502 TP_STRUCT__entry(
503 LOCAL_ENTRY
504 __field(u8, value)
505 __field(int, ret)
506 ),
507
508 TP_fast_assign(
509 LOCAL_ASSIGN;
510 __entry->ret = ret;
511 __entry->value = value;
512 ),
513
514 TP_printk(
515 LOCAL_PR_FMT " value:%d ret:%d",
516 LOCAL_PR_ARG, __entry->value, __entry->ret
517 )
518);
519
492TRACE_EVENT(drv_sta_notify, 520TRACE_EVENT(drv_sta_notify,
493 TP_PROTO(struct ieee80211_local *local, 521 TP_PROTO(struct ieee80211_local *local,
494 struct ieee80211_vif *vif, 522 struct ieee80211_sub_if_data *sdata,
495 enum sta_notify_cmd cmd, 523 enum sta_notify_cmd cmd,
496 struct ieee80211_sta *sta), 524 struct ieee80211_sta *sta),
497 525
498 TP_ARGS(local, vif, cmd, sta), 526 TP_ARGS(local, sdata, cmd, sta),
499 527
500 TP_STRUCT__entry( 528 TP_STRUCT__entry(
501 LOCAL_ENTRY 529 LOCAL_ENTRY
@@ -517,59 +545,88 @@ TRACE_EVENT(drv_sta_notify,
517 ) 545 )
518); 546);
519 547
520TRACE_EVENT(drv_conf_tx, 548TRACE_EVENT(drv_sta_add,
521 TP_PROTO(struct ieee80211_local *local, u16 queue, 549 TP_PROTO(struct ieee80211_local *local,
522 const struct ieee80211_tx_queue_params *params, 550 struct ieee80211_sub_if_data *sdata,
523 int ret), 551 struct ieee80211_sta *sta, int ret),
524 552
525 TP_ARGS(local, queue, params, ret), 553 TP_ARGS(local, sdata, sta, ret),
526 554
527 TP_STRUCT__entry( 555 TP_STRUCT__entry(
528 LOCAL_ENTRY 556 LOCAL_ENTRY
529 __field(u16, queue) 557 VIF_ENTRY
530 __field(u16, txop) 558 STA_ENTRY
531 __field(u16, cw_min)
532 __field(u16, cw_max)
533 __field(u8, aifs)
534 __field(int, ret) 559 __field(int, ret)
535 ), 560 ),
536 561
537 TP_fast_assign( 562 TP_fast_assign(
538 LOCAL_ASSIGN; 563 LOCAL_ASSIGN;
539 __entry->queue = queue; 564 VIF_ASSIGN;
565 STA_ASSIGN;
540 __entry->ret = ret; 566 __entry->ret = ret;
541 __entry->txop = params->txop;
542 __entry->cw_max = params->cw_max;
543 __entry->cw_min = params->cw_min;
544 __entry->aifs = params->aifs;
545 ), 567 ),
546 568
547 TP_printk( 569 TP_printk(
548 LOCAL_PR_FMT " queue:%d ret:%d", 570 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
549 LOCAL_PR_ARG, __entry->queue, __entry->ret 571 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
550 ) 572 )
551); 573);
552 574
553TRACE_EVENT(drv_get_tx_stats, 575TRACE_EVENT(drv_sta_remove,
554 TP_PROTO(struct ieee80211_local *local, 576 TP_PROTO(struct ieee80211_local *local,
555 struct ieee80211_tx_queue_stats *stats, 577 struct ieee80211_sub_if_data *sdata,
578 struct ieee80211_sta *sta),
579
580 TP_ARGS(local, sdata, sta),
581
582 TP_STRUCT__entry(
583 LOCAL_ENTRY
584 VIF_ENTRY
585 STA_ENTRY
586 ),
587
588 TP_fast_assign(
589 LOCAL_ASSIGN;
590 VIF_ASSIGN;
591 STA_ASSIGN;
592 ),
593
594 TP_printk(
595 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
596 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
597 )
598);
599
600TRACE_EVENT(drv_conf_tx,
601 TP_PROTO(struct ieee80211_local *local, u16 queue,
602 const struct ieee80211_tx_queue_params *params,
556 int ret), 603 int ret),
557 604
558 TP_ARGS(local, stats, ret), 605 TP_ARGS(local, queue, params, ret),
559 606
560 TP_STRUCT__entry( 607 TP_STRUCT__entry(
561 LOCAL_ENTRY 608 LOCAL_ENTRY
609 __field(u16, queue)
610 __field(u16, txop)
611 __field(u16, cw_min)
612 __field(u16, cw_max)
613 __field(u8, aifs)
562 __field(int, ret) 614 __field(int, ret)
563 ), 615 ),
564 616
565 TP_fast_assign( 617 TP_fast_assign(
566 LOCAL_ASSIGN; 618 LOCAL_ASSIGN;
619 __entry->queue = queue;
567 __entry->ret = ret; 620 __entry->ret = ret;
621 __entry->txop = params->txop;
622 __entry->cw_max = params->cw_max;
623 __entry->cw_min = params->cw_min;
624 __entry->aifs = params->aifs;
568 ), 625 ),
569 626
570 TP_printk( 627 TP_printk(
571 LOCAL_PR_FMT " ret:%d", 628 LOCAL_PR_FMT " queue:%d ret:%d",
572 LOCAL_PR_ARG, __entry->ret 629 LOCAL_PR_ARG, __entry->queue, __entry->ret
573 ) 630 )
574); 631);
575 632
@@ -656,12 +713,12 @@ TRACE_EVENT(drv_tx_last_beacon,
656 713
657TRACE_EVENT(drv_ampdu_action, 714TRACE_EVENT(drv_ampdu_action,
658 TP_PROTO(struct ieee80211_local *local, 715 TP_PROTO(struct ieee80211_local *local,
659 struct ieee80211_vif *vif, 716 struct ieee80211_sub_if_data *sdata,
660 enum ieee80211_ampdu_mlme_action action, 717 enum ieee80211_ampdu_mlme_action action,
661 struct ieee80211_sta *sta, u16 tid, 718 struct ieee80211_sta *sta, u16 tid,
662 u16 *ssn, int ret), 719 u16 *ssn, int ret),
663 720
664 TP_ARGS(local, vif, action, sta, tid, ssn, ret), 721 TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
665 722
666 TP_STRUCT__entry( 723 TP_STRUCT__entry(
667 LOCAL_ENTRY 724 LOCAL_ENTRY
@@ -688,6 +745,27 @@ TRACE_EVENT(drv_ampdu_action,
688 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret 745 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
689 ) 746 )
690); 747);
748
749TRACE_EVENT(drv_flush,
750 TP_PROTO(struct ieee80211_local *local, bool drop),
751
752 TP_ARGS(local, drop),
753
754 TP_STRUCT__entry(
755 LOCAL_ENTRY
756 __field(bool, drop)
757 ),
758
759 TP_fast_assign(
760 LOCAL_ASSIGN;
761 __entry->drop = drop;
762 ),
763
764 TP_printk(
765 LOCAL_PR_FMT " drop:%d",
766 LOCAL_PR_ARG, __entry->drop
767 )
768);
691#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 769#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
692 770
693#undef TRACE_INCLUDE_PATH 771#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d7dcee680728..bb677a73b7c9 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -125,7 +125,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
125 125
126 if (!skb) { 126 if (!skb) {
127 printk(KERN_ERR "%s: failed to allocate buffer " 127 printk(KERN_ERR "%s: failed to allocate buffer "
128 "for delba frame\n", sdata->dev->name); 128 "for delba frame\n", sdata->name);
129 return; 129 return;
130 } 130 }
131 131
@@ -133,10 +133,10 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
133 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 133 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
134 memset(mgmt, 0, 24); 134 memset(mgmt, 0, 24);
135 memcpy(mgmt->da, da, ETH_ALEN); 135 memcpy(mgmt->da, da, ETH_ALEN);
136 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 136 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
137 if (sdata->vif.type == NL80211_IFTYPE_AP || 137 if (sdata->vif.type == NL80211_IFTYPE_AP ||
138 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 138 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
139 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 139 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
140 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 140 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
141 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 141 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
142 142
@@ -185,3 +185,50 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
185 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
186 } 186 }
187} 187}
188
189int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
190 enum ieee80211_smps_mode smps, const u8 *da,
191 const u8 *bssid)
192{
193 struct ieee80211_local *local = sdata->local;
194 struct sk_buff *skb;
195 struct ieee80211_mgmt *action_frame;
196
197 /* 27 = header + category + action + smps mode */
198 skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
199 if (!skb)
200 return -ENOMEM;
201
202 skb_reserve(skb, local->hw.extra_tx_headroom);
203 action_frame = (void *)skb_put(skb, 27);
204 memcpy(action_frame->da, da, ETH_ALEN);
205 memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
206 memcpy(action_frame->bssid, bssid, ETH_ALEN);
207 action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
208 IEEE80211_STYPE_ACTION);
209 action_frame->u.action.category = WLAN_CATEGORY_HT;
210 action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
211 switch (smps) {
212 case IEEE80211_SMPS_AUTOMATIC:
213 case IEEE80211_SMPS_NUM_MODES:
214 WARN_ON(1);
215 case IEEE80211_SMPS_OFF:
216 action_frame->u.action.u.ht_smps.smps_control =
217 WLAN_HT_SMPS_CONTROL_DISABLED;
218 break;
219 case IEEE80211_SMPS_STATIC:
220 action_frame->u.action.u.ht_smps.smps_control =
221 WLAN_HT_SMPS_CONTROL_STATIC;
222 break;
223 case IEEE80211_SMPS_DYNAMIC:
224 action_frame->u.action.u.ht_smps.smps_control =
225 WLAN_HT_SMPS_CONTROL_DYNAMIC;
226 break;
227 }
228
229 /* we'll do more on status of this frame */
230 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
231 ieee80211_tx_skb(sdata, skb);
232
233 return 0;
234}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 22f0c2aa7a89..e2976da4e0d9 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/slab.h>
16#include <linux/if_ether.h> 17#include <linux/if_ether.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/if_arp.h> 19#include <linux/if_arp.h>
@@ -117,7 +118,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
117 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 118 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
118 IEEE80211_STYPE_PROBE_RESP); 119 IEEE80211_STYPE_PROBE_RESP);
119 memset(mgmt->da, 0xff, ETH_ALEN); 120 memset(mgmt->da, 0xff, ETH_ALEN);
120 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 121 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
121 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 122 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
122 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); 123 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
123 mgmt->u.beacon.timestamp = cpu_to_le64(tsf); 124 mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
@@ -187,15 +188,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
187static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 188static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
188 struct ieee80211_bss *bss) 189 struct ieee80211_bss *bss)
189{ 190{
191 struct cfg80211_bss *cbss =
192 container_of((void *)bss, struct cfg80211_bss, priv);
190 struct ieee80211_supported_band *sband; 193 struct ieee80211_supported_band *sband;
191 u32 basic_rates; 194 u32 basic_rates;
192 int i, j; 195 int i, j;
193 u16 beacon_int = bss->cbss.beacon_interval; 196 u16 beacon_int = cbss->beacon_interval;
194 197
195 if (beacon_int < 10) 198 if (beacon_int < 10)
196 beacon_int = 10; 199 beacon_int = 10;
197 200
198 sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band]; 201 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
199 202
200 basic_rates = 0; 203 basic_rates = 0;
201 204
@@ -212,12 +215,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
212 } 215 }
213 } 216 }
214 217
215 __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid, 218 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
216 beacon_int, 219 beacon_int,
217 bss->cbss.channel, 220 cbss->channel,
218 basic_rates, 221 basic_rates,
219 bss->cbss.capability, 222 cbss->capability,
220 bss->cbss.tsf); 223 cbss->tsf);
221} 224}
222 225
223static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 226static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -229,6 +232,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
229{ 232{
230 struct ieee80211_local *local = sdata->local; 233 struct ieee80211_local *local = sdata->local;
231 int freq; 234 int freq;
235 struct cfg80211_bss *cbss;
232 struct ieee80211_bss *bss; 236 struct ieee80211_bss *bss;
233 struct sta_info *sta; 237 struct sta_info *sta;
234 struct ieee80211_channel *channel; 238 struct ieee80211_channel *channel;
@@ -252,7 +256,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
252 256
253 rcu_read_lock(); 257 rcu_read_lock();
254 258
255 sta = sta_info_get(local, mgmt->sa); 259 sta = sta_info_get(sdata, mgmt->sa);
256 if (sta) { 260 if (sta) {
257 u32 prev_rates; 261 u32 prev_rates;
258 262
@@ -266,16 +270,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
266 printk(KERN_DEBUG "%s: updated supp_rates set " 270 printk(KERN_DEBUG "%s: updated supp_rates set "
267 "for %pM based on beacon info (0x%llx | " 271 "for %pM based on beacon info (0x%llx | "
268 "0x%llx -> 0x%llx)\n", 272 "0x%llx -> 0x%llx)\n",
269 sdata->dev->name, 273 sdata->name,
270 sta->sta.addr, 274 sta->sta.addr,
271 (unsigned long long) prev_rates, 275 (unsigned long long) prev_rates,
272 (unsigned long long) supp_rates, 276 (unsigned long long) supp_rates,
273 (unsigned long long) sta->sta.supp_rates[band]); 277 (unsigned long long) sta->sta.supp_rates[band]);
274#endif 278#endif
275 } else 279 rcu_read_unlock();
276 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 280 } else {
277 281 rcu_read_unlock();
278 rcu_read_unlock(); 282 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
283 supp_rates, GFP_KERNEL);
284 }
279 } 285 }
280 286
281 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, 287 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
@@ -283,25 +289,23 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
283 if (!bss) 289 if (!bss)
284 return; 290 return;
285 291
292 cbss = container_of((void *)bss, struct cfg80211_bss, priv);
293
286 /* was just updated in ieee80211_bss_info_update */ 294 /* was just updated in ieee80211_bss_info_update */
287 beacon_timestamp = bss->cbss.tsf; 295 beacon_timestamp = cbss->tsf;
288 296
289 /* check if we need to merge IBSS */ 297 /* check if we need to merge IBSS */
290 298
291 /* merge only on beacons (???) */
292 if (!beacon)
293 goto put_bss;
294
295 /* we use a fixed BSSID */ 299 /* we use a fixed BSSID */
296 if (sdata->u.ibss.bssid) 300 if (sdata->u.ibss.fixed_bssid)
297 goto put_bss; 301 goto put_bss;
298 302
299 /* not an IBSS */ 303 /* not an IBSS */
300 if (!(bss->cbss.capability & WLAN_CAPABILITY_IBSS)) 304 if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
301 goto put_bss; 305 goto put_bss;
302 306
303 /* different channel */ 307 /* different channel */
304 if (bss->cbss.channel != local->oper_channel) 308 if (cbss->channel != local->oper_channel)
305 goto put_bss; 309 goto put_bss;
306 310
307 /* different SSID */ 311 /* different SSID */
@@ -311,7 +315,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
311 goto put_bss; 315 goto put_bss;
312 316
313 /* same BSSID */ 317 /* same BSSID */
314 if (memcmp(bss->cbss.bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) 318 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
315 goto put_bss; 319 goto put_bss;
316 320
317 if (rx_status->flag & RX_FLAG_TSFT) { 321 if (rx_status->flag & RX_FLAG_TSFT) {
@@ -364,10 +368,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
364#ifdef CONFIG_MAC80211_IBSS_DEBUG 368#ifdef CONFIG_MAC80211_IBSS_DEBUG
365 printk(KERN_DEBUG "%s: beacon TSF higher than " 369 printk(KERN_DEBUG "%s: beacon TSF higher than "
366 "local TSF - IBSS merge with BSSID %pM\n", 370 "local TSF - IBSS merge with BSSID %pM\n",
367 sdata->dev->name, mgmt->bssid); 371 sdata->name, mgmt->bssid);
368#endif 372#endif
369 ieee80211_sta_join_ibss(sdata, bss); 373 ieee80211_sta_join_ibss(sdata, bss);
370 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 374 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
375 supp_rates, GFP_KERNEL);
371 } 376 }
372 377
373 put_bss: 378 put_bss:
@@ -380,7 +385,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
380 * must be callable in atomic context. 385 * must be callable in atomic context.
381 */ 386 */
382struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 387struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
383 u8 *bssid,u8 *addr, u32 supp_rates) 388 u8 *bssid,u8 *addr, u32 supp_rates,
389 gfp_t gfp)
384{ 390{
385 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 391 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
386 struct ieee80211_local *local = sdata->local; 392 struct ieee80211_local *local = sdata->local;
@@ -394,7 +400,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
394 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 400 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
395 if (net_ratelimit()) 401 if (net_ratelimit())
396 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", 402 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
397 sdata->dev->name, addr); 403 sdata->name, addr);
398 return NULL; 404 return NULL;
399 } 405 }
400 406
@@ -406,10 +412,10 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
406 412
407#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 413#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
408 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n", 414 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n",
409 wiphy_name(local->hw.wiphy), addr, sdata->dev->name); 415 wiphy_name(local->hw.wiphy), addr, sdata->name);
410#endif 416#endif
411 417
412 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 418 sta = sta_info_alloc(sdata, addr, gfp);
413 if (!sta) 419 if (!sta)
414 return NULL; 420 return NULL;
415 421
@@ -421,9 +427,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
421 427
422 rate_control_rate_init(sta); 428 rate_control_rate_init(sta);
423 429
430 /* If it fails, maybe we raced another insertion? */
424 if (sta_info_insert(sta)) 431 if (sta_info_insert(sta))
425 return NULL; 432 return sta_info_get(sdata, addr);
426
427 return sta; 433 return sta;
428} 434}
429 435
@@ -449,6 +455,9 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
449 return active; 455 return active;
450} 456}
451 457
458/*
459 * This function is called with state == IEEE80211_IBSS_MLME_JOINED
460 */
452 461
453static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) 462static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
454{ 463{
@@ -470,7 +479,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
470 return; 479 return;
471 480
472 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 481 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
473 "IBSS networks with same SSID (merge)\n", sdata->dev->name); 482 "IBSS networks with same SSID (merge)\n", sdata->name);
474 483
475 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len); 484 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
476} 485}
@@ -492,13 +501,13 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
492 * random number generator get different BSSID. */ 501 * random number generator get different BSSID. */
493 get_random_bytes(bssid, ETH_ALEN); 502 get_random_bytes(bssid, ETH_ALEN);
494 for (i = 0; i < ETH_ALEN; i++) 503 for (i = 0; i < ETH_ALEN; i++)
495 bssid[i] ^= sdata->dev->dev_addr[i]; 504 bssid[i] ^= sdata->vif.addr[i];
496 bssid[0] &= ~0x01; 505 bssid[0] &= ~0x01;
497 bssid[0] |= 0x02; 506 bssid[0] |= 0x02;
498 } 507 }
499 508
500 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 509 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
501 sdata->dev->name, bssid); 510 sdata->name, bssid);
502 511
503 sband = local->hw.wiphy->bands[ifibss->channel->band]; 512 sband = local->hw.wiphy->bands[ifibss->channel->band];
504 513
@@ -514,11 +523,15 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
514 capability, 0); 523 capability, 0);
515} 524}
516 525
526/*
527 * This function is called with state == IEEE80211_IBSS_MLME_SEARCH
528 */
529
517static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) 530static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
518{ 531{
519 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 532 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
520 struct ieee80211_local *local = sdata->local; 533 struct ieee80211_local *local = sdata->local;
521 struct ieee80211_bss *bss; 534 struct cfg80211_bss *cbss;
522 struct ieee80211_channel *chan = NULL; 535 struct ieee80211_channel *chan = NULL;
523 const u8 *bssid = NULL; 536 const u8 *bssid = NULL;
524 int active_ibss; 537 int active_ibss;
@@ -527,7 +540,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
527 active_ibss = ieee80211_sta_active_ibss(sdata); 540 active_ibss = ieee80211_sta_active_ibss(sdata);
528#ifdef CONFIG_MAC80211_IBSS_DEBUG 541#ifdef CONFIG_MAC80211_IBSS_DEBUG
529 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 542 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
530 sdata->dev->name, active_ibss); 543 sdata->name, active_ibss);
531#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 544#endif /* CONFIG_MAC80211_IBSS_DEBUG */
532 545
533 if (active_ibss) 546 if (active_ibss)
@@ -542,21 +555,23 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
542 chan = ifibss->channel; 555 chan = ifibss->channel;
543 if (!is_zero_ether_addr(ifibss->bssid)) 556 if (!is_zero_ether_addr(ifibss->bssid))
544 bssid = ifibss->bssid; 557 bssid = ifibss->bssid;
545 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid, 558 cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
546 ifibss->ssid, ifibss->ssid_len, 559 ifibss->ssid, ifibss->ssid_len,
547 WLAN_CAPABILITY_IBSS | 560 WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
548 WLAN_CAPABILITY_PRIVACY, 561 capability);
549 capability); 562
563 if (cbss) {
564 struct ieee80211_bss *bss;
550 565
551 if (bss) { 566 bss = (void *)cbss->priv;
552#ifdef CONFIG_MAC80211_IBSS_DEBUG 567#ifdef CONFIG_MAC80211_IBSS_DEBUG
553 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 568 printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
554 "%pM\n", bss->cbss.bssid, ifibss->bssid); 569 "%pM\n", cbss->bssid, ifibss->bssid);
555#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 570#endif /* CONFIG_MAC80211_IBSS_DEBUG */
556 571
557 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 572 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
558 " based on configured SSID\n", 573 " based on configured SSID\n",
559 sdata->dev->name, bss->cbss.bssid); 574 sdata->name, cbss->bssid);
560 575
561 ieee80211_sta_join_ibss(sdata, bss); 576 ieee80211_sta_join_ibss(sdata, bss);
562 ieee80211_rx_bss_put(local, bss); 577 ieee80211_rx_bss_put(local, bss);
@@ -568,18 +583,14 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
568#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 583#endif /* CONFIG_MAC80211_IBSS_DEBUG */
569 584
570 /* Selected IBSS not found in current scan results - try to scan */ 585 /* Selected IBSS not found in current scan results - try to scan */
571 if (ifibss->state == IEEE80211_IBSS_MLME_JOINED && 586 if (time_after(jiffies, ifibss->last_scan_completed +
572 !ieee80211_sta_active_ibss(sdata)) {
573 mod_timer(&ifibss->timer,
574 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
575 } else if (time_after(jiffies, ifibss->last_scan_completed +
576 IEEE80211_SCAN_INTERVAL)) { 587 IEEE80211_SCAN_INTERVAL)) {
577 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 588 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
578 "join\n", sdata->dev->name); 589 "join\n", sdata->name);
579 590
580 ieee80211_request_internal_scan(sdata, ifibss->ssid, 591 ieee80211_request_internal_scan(sdata, ifibss->ssid,
581 ifibss->ssid_len); 592 ifibss->ssid_len);
582 } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) { 593 } else {
583 int interval = IEEE80211_SCAN_INTERVAL; 594 int interval = IEEE80211_SCAN_INTERVAL;
584 595
585 if (time_after(jiffies, ifibss->ibss_join_req + 596 if (time_after(jiffies, ifibss->ibss_join_req +
@@ -589,7 +600,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
589 return; 600 return;
590 } 601 }
591 printk(KERN_DEBUG "%s: IBSS not allowed on" 602 printk(KERN_DEBUG "%s: IBSS not allowed on"
592 " %d MHz\n", sdata->dev->name, 603 " %d MHz\n", sdata->name,
593 local->hw.conf.channel->center_freq); 604 local->hw.conf.channel->center_freq);
594 605
595 /* No IBSS found - decrease scan interval and continue 606 /* No IBSS found - decrease scan interval and continue
@@ -597,7 +608,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
597 interval = IEEE80211_SCAN_INTERVAL_SLOW; 608 interval = IEEE80211_SCAN_INTERVAL_SLOW;
598 } 609 }
599 610
600 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
601 mod_timer(&ifibss->timer, 611 mod_timer(&ifibss->timer,
602 round_jiffies(jiffies + interval)); 612 round_jiffies(jiffies + interval));
603 } 613 }
@@ -623,7 +633,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
623#ifdef CONFIG_MAC80211_IBSS_DEBUG 633#ifdef CONFIG_MAC80211_IBSS_DEBUG
624 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 634 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
625 " (tx_last_beacon=%d)\n", 635 " (tx_last_beacon=%d)\n",
626 sdata->dev->name, mgmt->sa, mgmt->da, 636 sdata->name, mgmt->sa, mgmt->da,
627 mgmt->bssid, tx_last_beacon); 637 mgmt->bssid, tx_last_beacon);
628#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 638#endif /* CONFIG_MAC80211_IBSS_DEBUG */
629 639
@@ -641,7 +651,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
641#ifdef CONFIG_MAC80211_IBSS_DEBUG 651#ifdef CONFIG_MAC80211_IBSS_DEBUG
642 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 652 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
643 "from %pM\n", 653 "from %pM\n",
644 sdata->dev->name, mgmt->sa); 654 sdata->name, mgmt->sa);
645#endif 655#endif
646 return; 656 return;
647 } 657 }
@@ -661,7 +671,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
661 memcpy(resp->da, mgmt->sa, ETH_ALEN); 671 memcpy(resp->da, mgmt->sa, ETH_ALEN);
662#ifdef CONFIG_MAC80211_IBSS_DEBUG 672#ifdef CONFIG_MAC80211_IBSS_DEBUG
663 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n", 673 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
664 sdata->dev->name, resp->da); 674 sdata->name, resp->da);
665#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 675#endif /* CONFIG_MAC80211_IBSS_DEBUG */
666 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 676 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
667 ieee80211_tx_skb(sdata, skb); 677 ieee80211_tx_skb(sdata, skb);
@@ -675,7 +685,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
675 size_t baselen; 685 size_t baselen;
676 struct ieee802_11_elems elems; 686 struct ieee802_11_elems elems;
677 687
678 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 688 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
679 return; /* ignore ProbeResp to foreign address */ 689 return; /* ignore ProbeResp to foreign address */
680 690
681 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 691 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -748,7 +758,7 @@ static void ieee80211_ibss_work(struct work_struct *work)
748 if (WARN_ON(local->suspended)) 758 if (WARN_ON(local->suspended))
749 return; 759 return;
750 760
751 if (!netif_running(sdata->dev)) 761 if (!ieee80211_sdata_running(sdata))
752 return; 762 return;
753 763
754 if (local->scanning) 764 if (local->scanning)
@@ -831,7 +841,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
831 841
832 mutex_lock(&local->iflist_mtx); 842 mutex_lock(&local->iflist_mtx);
833 list_for_each_entry(sdata, &local->interfaces, list) { 843 list_for_each_entry(sdata, &local->interfaces, list) {
834 if (!netif_running(sdata->dev)) 844 if (!ieee80211_sdata_running(sdata))
835 continue; 845 continue;
836 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 846 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
837 continue; 847 continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 91dc8636d644..241533e1bc03 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2,7 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005, Devicescape Software, Inc. 3 * Copyright 2005, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -58,6 +58,15 @@ struct ieee80211_local;
58 58
59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
60 60
61#define IEEE80211_DEFAULT_UAPSD_QUEUES \
62 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
63 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
64 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
65 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
66
67#define IEEE80211_DEFAULT_MAX_SP_LEN \
68 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
69
61struct ieee80211_fragment_entry { 70struct ieee80211_fragment_entry {
62 unsigned long first_frag_time; 71 unsigned long first_frag_time;
63 unsigned int seq; 72 unsigned int seq;
@@ -71,9 +80,6 @@ struct ieee80211_fragment_entry {
71 80
72 81
73struct ieee80211_bss { 82struct ieee80211_bss {
74 /* Yes, this is a hack */
75 struct cfg80211_bss cbss;
76
77 /* don't want to look up all the time */ 83 /* don't want to look up all the time */
78 size_t ssid_len; 84 size_t ssid_len;
79 u8 ssid[IEEE80211_MAX_SSID_LEN]; 85 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -81,6 +87,7 @@ struct ieee80211_bss {
81 u8 dtim_period; 87 u8 dtim_period;
82 88
83 bool wmm_used; 89 bool wmm_used;
90 bool uapsd_supported;
84 91
85 unsigned long last_probe_resp; 92 unsigned long last_probe_resp;
86 93
@@ -140,7 +147,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
140 147
141struct ieee80211_tx_data { 148struct ieee80211_tx_data {
142 struct sk_buff *skb; 149 struct sk_buff *skb;
143 struct net_device *dev;
144 struct ieee80211_local *local; 150 struct ieee80211_local *local;
145 struct ieee80211_sub_if_data *sdata; 151 struct ieee80211_sub_if_data *sdata;
146 struct sta_info *sta; 152 struct sta_info *sta;
@@ -228,31 +234,77 @@ struct mesh_preq_queue {
228 u8 flags; 234 u8 flags;
229}; 235};
230 236
231enum ieee80211_mgd_state { 237enum ieee80211_work_type {
232 IEEE80211_MGD_STATE_IDLE, 238 IEEE80211_WORK_ABORT,
233 IEEE80211_MGD_STATE_PROBE, 239 IEEE80211_WORK_DIRECT_PROBE,
234 IEEE80211_MGD_STATE_AUTH, 240 IEEE80211_WORK_AUTH,
235 IEEE80211_MGD_STATE_ASSOC, 241 IEEE80211_WORK_ASSOC,
242 IEEE80211_WORK_REMAIN_ON_CHANNEL,
236}; 243};
237 244
238struct ieee80211_mgd_work { 245/**
246 * enum work_done_result - indicates what to do after work was done
247 *
248 * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
249 * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
250 * should be requeued.
251 */
252enum work_done_result {
253 WORK_DONE_DESTROY,
254 WORK_DONE_REQUEUE,
255};
256
257struct ieee80211_work {
239 struct list_head list; 258 struct list_head list;
240 struct ieee80211_bss *bss; 259
241 int ie_len; 260 struct rcu_head rcu_head;
242 u8 prev_bssid[ETH_ALEN]; 261
243 u8 ssid[IEEE80211_MAX_SSID_LEN]; 262 struct ieee80211_sub_if_data *sdata;
244 u8 ssid_len; 263
264 enum work_done_result (*done)(struct ieee80211_work *wk,
265 struct sk_buff *skb);
266
267 struct ieee80211_channel *chan;
268 enum nl80211_channel_type chan_type;
269
245 unsigned long timeout; 270 unsigned long timeout;
246 enum ieee80211_mgd_state state; 271 enum ieee80211_work_type type;
247 u16 auth_alg, auth_transaction; 272
273 u8 filter_ta[ETH_ALEN];
248 274
249 int tries; 275 bool started;
250 276
251 u8 key[WLAN_KEY_LEN_WEP104]; 277 union {
252 u8 key_len, key_idx; 278 struct {
279 int tries;
280 u16 algorithm, transaction;
281 u8 ssid[IEEE80211_MAX_SSID_LEN];
282 u8 ssid_len;
283 u8 key[WLAN_KEY_LEN_WEP104];
284 u8 key_len, key_idx;
285 bool privacy;
286 } probe_auth;
287 struct {
288 struct cfg80211_bss *bss;
289 const u8 *supp_rates;
290 const u8 *ht_information_ie;
291 enum ieee80211_smps_mode smps;
292 int tries;
293 u16 capability;
294 u8 prev_bssid[ETH_ALEN];
295 u8 ssid[IEEE80211_MAX_SSID_LEN];
296 u8 ssid_len;
297 u8 supp_rates_len;
298 bool wmm_used, use_11n, uapsd_used;
299 } assoc;
300 struct {
301 u32 duration;
302 } remain;
303 };
253 304
305 int ie_len;
254 /* must be last */ 306 /* must be last */
255 u8 ie[0]; /* for auth or assoc frame, not probe */ 307 u8 ie[0];
256}; 308};
257 309
258/* flags used in struct ieee80211_if_managed.flags */ 310/* flags used in struct ieee80211_if_managed.flags */
@@ -260,15 +312,11 @@ enum ieee80211_sta_flags {
260 IEEE80211_STA_BEACON_POLL = BIT(0), 312 IEEE80211_STA_BEACON_POLL = BIT(0),
261 IEEE80211_STA_CONNECTION_POLL = BIT(1), 313 IEEE80211_STA_CONNECTION_POLL = BIT(1),
262 IEEE80211_STA_CONTROL_PORT = BIT(2), 314 IEEE80211_STA_CONTROL_PORT = BIT(2),
263 IEEE80211_STA_WMM_ENABLED = BIT(3),
264 IEEE80211_STA_DISABLE_11N = BIT(4), 315 IEEE80211_STA_DISABLE_11N = BIT(4),
265 IEEE80211_STA_CSA_RECEIVED = BIT(5), 316 IEEE80211_STA_CSA_RECEIVED = BIT(5),
266 IEEE80211_STA_MFP_ENABLED = BIT(6), 317 IEEE80211_STA_MFP_ENABLED = BIT(6),
267}; 318 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
268 319 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
269/* flags for MLME request */
270enum ieee80211_sta_request {
271 IEEE80211_STA_REQ_SCAN,
272}; 320};
273 321
274struct ieee80211_if_managed { 322struct ieee80211_if_managed {
@@ -285,21 +333,18 @@ struct ieee80211_if_managed {
285 int probe_send_count; 333 int probe_send_count;
286 334
287 struct mutex mtx; 335 struct mutex mtx;
288 struct ieee80211_bss *associated; 336 struct cfg80211_bss *associated;
289 struct ieee80211_mgd_work *old_associate_work;
290 struct list_head work_list;
291 337
292 u8 bssid[ETH_ALEN]; 338 u8 bssid[ETH_ALEN];
293 339
294 u16 aid; 340 u16 aid;
295 u16 capab;
296 341
297 struct sk_buff_head skb_queue; 342 struct sk_buff_head skb_queue;
298 343
299 unsigned long timers_running; /* used for quiesce/restart */ 344 unsigned long timers_running; /* used for quiesce/restart */
300 bool powersave; /* powersave requested for this iface */ 345 bool powersave; /* powersave requested for this iface */
301 346 enum ieee80211_smps_mode req_smps, /* requested smps mode */
302 unsigned long request; 347 ap_smps; /* smps mode AP thinks we're in */
303 348
304 unsigned int flags; 349 unsigned int flags;
305 350
@@ -433,6 +478,8 @@ struct ieee80211_sub_if_data {
433 478
434 int drop_unencrypted; 479 int drop_unencrypted;
435 480
481 char name[IFNAMSIZ];
482
436 /* 483 /*
437 * keep track of whether the HT opmode (stored in 484 * keep track of whether the HT opmode (stored in
438 * vif.bss_info.ht_operation_mode) is valid. 485 * vif.bss_info.ht_operation_mode) is valid.
@@ -458,8 +505,8 @@ struct ieee80211_sub_if_data {
458 */ 505 */
459 struct ieee80211_if_ap *bss; 506 struct ieee80211_if_ap *bss;
460 507
461 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ 508 /* bitmap of allowed (non-MCS) rate indexes for rate control */
462 int max_ratectrl_rateidx; /* max TX rateidx for rate control */ 509 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
463 510
464 union { 511 union {
465 struct ieee80211_if_ap ap; 512 struct ieee80211_if_ap ap;
@@ -565,6 +612,15 @@ struct ieee80211_local {
565 const struct ieee80211_ops *ops; 612 const struct ieee80211_ops *ops;
566 613
567 /* 614 /*
615 * work stuff, potentially off-channel (in the future)
616 */
617 struct mutex work_mtx;
618 struct list_head work_list;
619 struct timer_list work_timer;
620 struct work_struct work_work;
621 struct sk_buff_head work_skb_queue;
622
623 /*
568 * private workqueue to mac80211. mac80211 makes this accessible 624 * private workqueue to mac80211. mac80211 makes this accessible
569 * via ieee80211_queue_work() 625 * via ieee80211_queue_work()
570 */ 626 */
@@ -586,6 +642,9 @@ struct ieee80211_local {
586 /* used for uploading changed mc list */ 642 /* used for uploading changed mc list */
587 struct work_struct reconfig_filter; 643 struct work_struct reconfig_filter;
588 644
645 /* used to reconfigure hardware SM PS */
646 struct work_struct recalc_smps;
647
589 /* aggregated multicast list */ 648 /* aggregated multicast list */
590 struct dev_addr_list *mc_list; 649 struct dev_addr_list *mc_list;
591 int mc_count; 650 int mc_count;
@@ -630,15 +689,18 @@ struct ieee80211_local {
630 689
631 /* Station data */ 690 /* Station data */
632 /* 691 /*
633 * The lock only protects the list, hash, timer and counter 692 * The mutex only protects the list and counter,
634 * against manipulation, reads are done in RCU. Additionally, 693 * reads are done in RCU.
635 * the lock protects each BSS's TIM bitmap. 694 * Additionally, the lock protects the hash table,
695 * the pending list and each BSS's TIM bitmap.
636 */ 696 */
697 struct mutex sta_mtx;
637 spinlock_t sta_lock; 698 spinlock_t sta_lock;
638 unsigned long num_sta; 699 unsigned long num_sta;
639 struct list_head sta_list; 700 struct list_head sta_list, sta_pending_list;
640 struct sta_info *sta_hash[STA_HASH_SIZE]; 701 struct sta_info *sta_hash[STA_HASH_SIZE];
641 struct timer_list sta_cleanup; 702 struct timer_list sta_cleanup;
703 struct work_struct sta_finish_work;
642 int sta_generation; 704 int sta_generation;
643 705
644 struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; 706 struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -689,6 +751,10 @@ struct ieee80211_local {
689 enum nl80211_channel_type oper_channel_type; 751 enum nl80211_channel_type oper_channel_type;
690 struct ieee80211_channel *oper_channel, *csa_channel; 752 struct ieee80211_channel *oper_channel, *csa_channel;
691 753
754 /* Temporary remain-on-channel for off-channel operations */
755 struct ieee80211_channel *tmp_channel;
756 enum nl80211_channel_type tmp_channel_type;
757
692 /* SNMP counters */ 758 /* SNMP counters */
693 /* dot11CountersTable */ 759 /* dot11CountersTable */
694 u32 dot11TransmittedFragmentCount; 760 u32 dot11TransmittedFragmentCount;
@@ -708,10 +774,6 @@ struct ieee80211_local {
708 assoc_led_name[32], radio_led_name[32]; 774 assoc_led_name[32], radio_led_name[32];
709#endif 775#endif
710 776
711#ifdef CONFIG_MAC80211_DEBUGFS
712 struct work_struct sta_debugfs_add;
713#endif
714
715#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 777#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
716 /* TX/RX handler statistics */ 778 /* TX/RX handler statistics */
717 unsigned int tx_handlers_drop; 779 unsigned int tx_handlers_drop;
@@ -745,8 +807,22 @@ struct ieee80211_local {
745 int wifi_wme_noack_test; 807 int wifi_wme_noack_test;
746 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 808 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
747 809
810 /*
811 * Bitmask of enabled u-apsd queues,
812 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
813 * to take effect.
814 */
815 unsigned int uapsd_queues;
816
817 /*
818 * Maximum number of buffered frames AP can deliver during a
819 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
820 * Needs a new association to take effect.
821 */
822 unsigned int uapsd_max_sp_len;
823
748 bool pspolling; 824 bool pspolling;
749 bool scan_ps_enabled; 825 bool offchannel_ps_enabled;
750 /* 826 /*
751 * PS can only be enabled when we have exactly one managed 827 * PS can only be enabled when we have exactly one managed
752 * interface (and monitors) in PS, this then points there. 828 * interface (and monitors) in PS, this then points there.
@@ -760,6 +836,8 @@ struct ieee80211_local {
760 int user_power_level; /* in dBm */ 836 int user_power_level; /* in dBm */
761 int power_constr_level; /* in dBm */ 837 int power_constr_level; /* in dBm */
762 838
839 enum ieee80211_smps_mode smps_mode;
840
763 struct work_struct restart_work; 841 struct work_struct restart_work;
764 842
765#ifdef CONFIG_MAC80211_DEBUGFS 843#ifdef CONFIG_MAC80211_DEBUGFS
@@ -874,6 +952,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
874void ieee80211_configure_filter(struct ieee80211_local *local); 952void ieee80211_configure_filter(struct ieee80211_local *local);
875u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); 953u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
876 954
955extern bool ieee80211_disable_40mhz_24ghz;
956
877/* STA code */ 957/* STA code */
878void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); 958void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
879int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 959int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -886,6 +966,10 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
886int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, 966int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
887 struct cfg80211_disassoc_request *req, 967 struct cfg80211_disassoc_request *req,
888 void *cookie); 968 void *cookie);
969int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
970 struct ieee80211_channel *chan,
971 enum nl80211_channel_type channel_type,
972 const u8 *buf, size_t len, u64 *cookie);
889ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, 973ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
890 struct sk_buff *skb); 974 struct sk_buff *skb);
891void ieee80211_send_pspoll(struct ieee80211_local *local, 975void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -905,7 +989,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
905ieee80211_rx_result 989ieee80211_rx_result
906ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 990ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
907struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 991struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
908 u8 *bssid, u8 *addr, u32 supp_rates); 992 u8 *bssid, u8 *addr, u32 supp_rates,
993 gfp_t gfp);
909int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 994int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
910 struct cfg80211_ibss_params *params); 995 struct cfg80211_ibss_params *params);
911int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); 996int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
@@ -937,7 +1022,15 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
937void ieee80211_rx_bss_put(struct ieee80211_local *local, 1022void ieee80211_rx_bss_put(struct ieee80211_local *local,
938 struct ieee80211_bss *bss); 1023 struct ieee80211_bss *bss);
939 1024
1025/* off-channel helpers */
1026void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
1027void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
1028void ieee80211_offchannel_return(struct ieee80211_local *local,
1029 bool enable_beaconing);
1030
940/* interface handling */ 1031/* interface handling */
1032int ieee80211_iface_init(void);
1033void ieee80211_iface_exit(void);
941int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1034int ieee80211_if_add(struct ieee80211_local *local, const char *name,
942 struct net_device **new_dev, enum nl80211_iftype type, 1035 struct net_device **new_dev, enum nl80211_iftype type,
943 struct vif_params *params); 1036 struct vif_params *params);
@@ -948,6 +1041,11 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
948u32 __ieee80211_recalc_idle(struct ieee80211_local *local); 1041u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
949void ieee80211_recalc_idle(struct ieee80211_local *local); 1042void ieee80211_recalc_idle(struct ieee80211_local *local);
950 1043
1044static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1045{
1046 return netif_running(sdata->dev);
1047}
1048
951/* tx handling */ 1049/* tx handling */
952void ieee80211_clear_tx_pending(struct ieee80211_local *local); 1050void ieee80211_clear_tx_pending(struct ieee80211_local *local);
953void ieee80211_tx_pending(unsigned long data); 1051void ieee80211_tx_pending(unsigned long data);
@@ -976,6 +1074,9 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
976void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1074void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
977 const u8 *da, u16 tid, 1075 const u8 *da, u16 tid,
978 u16 initiator, u16 reason_code); 1076 u16 initiator, u16 reason_code);
1077int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1078 enum ieee80211_smps_mode smps, const u8 *da,
1079 const u8 *bssid);
979 1080
980void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, 1081void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
981 u16 tid, u16 initiator, u16 reason); 1082 u16 tid, u16 initiator, u16 reason);
@@ -1086,6 +1187,28 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1086u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1187u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1087 struct ieee802_11_elems *elems, 1188 struct ieee802_11_elems *elems,
1088 enum ieee80211_band band); 1189 enum ieee80211_band band);
1190int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1191 enum ieee80211_smps_mode smps_mode);
1192void ieee80211_recalc_smps(struct ieee80211_local *local,
1193 struct ieee80211_sub_if_data *forsdata);
1194
1195size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1196 const u8 *ids, int n_ids, size_t offset);
1197size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1198
1199/* internal work items */
1200void ieee80211_work_init(struct ieee80211_local *local);
1201void ieee80211_add_work(struct ieee80211_work *wk);
1202void free_work(struct ieee80211_work *wk);
1203void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
1204ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1205 struct sk_buff *skb);
1206int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1207 struct ieee80211_channel *chan,
1208 enum nl80211_channel_type channel_type,
1209 unsigned int duration, u64 *cookie);
1210int ieee80211_wk_cancel_remain_on_channel(
1211 struct ieee80211_sub_if_data *sdata, u64 cookie);
1089 1212
1090#ifdef CONFIG_MAC80211_NOINLINE 1213#ifdef CONFIG_MAC80211_NOINLINE
1091#define debug_noinline noinline 1214#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 32abae3ce32a..e08fa8eda1b3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -10,6 +10,7 @@
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13#include <linux/slab.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/if_arp.h> 15#include <linux/if_arp.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
@@ -62,6 +63,23 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
62 return 0; 63 return 0;
63} 64}
64 65
66static int ieee80211_change_mac(struct net_device *dev, void *addr)
67{
68 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
69 struct sockaddr *sa = addr;
70 int ret;
71
72 if (ieee80211_sdata_running(sdata))
73 return -EBUSY;
74
75 ret = eth_mac_addr(dev, sa);
76
77 if (ret == 0)
78 memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
79
80 return ret;
81}
82
65static inline int identical_mac_addr_allowed(int type1, int type2) 83static inline int identical_mac_addr_allowed(int type1, int type2)
66{ 84{
67 return type1 == NL80211_IFTYPE_MONITOR || 85 return type1 == NL80211_IFTYPE_MONITOR ||
@@ -82,7 +100,6 @@ static int ieee80211_open(struct net_device *dev)
82 struct ieee80211_sub_if_data *nsdata; 100 struct ieee80211_sub_if_data *nsdata;
83 struct ieee80211_local *local = sdata->local; 101 struct ieee80211_local *local = sdata->local;
84 struct sta_info *sta; 102 struct sta_info *sta;
85 struct ieee80211_if_init_conf conf;
86 u32 changed = 0; 103 u32 changed = 0;
87 int res; 104 int res;
88 u32 hw_reconf_flags = 0; 105 u32 hw_reconf_flags = 0;
@@ -97,7 +114,7 @@ static int ieee80211_open(struct net_device *dev)
97 list_for_each_entry(nsdata, &local->interfaces, list) { 114 list_for_each_entry(nsdata, &local->interfaces, list) {
98 struct net_device *ndev = nsdata->dev; 115 struct net_device *ndev = nsdata->dev;
99 116
100 if (ndev != dev && netif_running(ndev)) { 117 if (ndev != dev && ieee80211_sdata_running(nsdata)) {
101 /* 118 /*
102 * Allow only a single IBSS interface to be up at any 119 * Allow only a single IBSS interface to be up at any
103 * time. This is restricted because beacon distribution 120 * time. This is restricted because beacon distribution
@@ -183,7 +200,7 @@ static int ieee80211_open(struct net_device *dev)
183 struct net_device *ndev = nsdata->dev; 200 struct net_device *ndev = nsdata->dev;
184 201
185 /* 202 /*
186 * No need to check netif_running since we do not allow 203 * No need to check running since we do not allow
187 * it to start up with this invalid address. 204 * it to start up with this invalid address.
188 */ 205 */
189 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { 206 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) {
@@ -234,10 +251,7 @@ static int ieee80211_open(struct net_device *dev)
234 ieee80211_configure_filter(local); 251 ieee80211_configure_filter(local);
235 break; 252 break;
236 default: 253 default:
237 conf.vif = &sdata->vif; 254 res = drv_add_interface(local, &sdata->vif);
238 conf.type = sdata->vif.type;
239 conf.mac_addr = dev->dev_addr;
240 res = drv_add_interface(local, &conf);
241 if (res) 255 if (res)
242 goto err_stop; 256 goto err_stop;
243 257
@@ -320,7 +334,7 @@ static int ieee80211_open(struct net_device *dev)
320 334
321 return 0; 335 return 0;
322 err_del_interface: 336 err_del_interface:
323 drv_remove_interface(local, &conf); 337 drv_remove_interface(local, &sdata->vif);
324 err_stop: 338 err_stop:
325 if (!local->open_count) 339 if (!local->open_count)
326 drv_stop(local); 340 drv_stop(local);
@@ -335,7 +349,6 @@ static int ieee80211_stop(struct net_device *dev)
335{ 349{
336 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 350 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
337 struct ieee80211_local *local = sdata->local; 351 struct ieee80211_local *local = sdata->local;
338 struct ieee80211_if_init_conf conf;
339 struct sta_info *sta; 352 struct sta_info *sta;
340 unsigned long flags; 353 unsigned long flags;
341 struct sk_buff *skb, *tmp; 354 struct sk_buff *skb, *tmp;
@@ -348,6 +361,11 @@ static int ieee80211_stop(struct net_device *dev)
348 netif_tx_stop_all_queues(dev); 361 netif_tx_stop_all_queues(dev);
349 362
350 /* 363 /*
364 * Purge work for this interface.
365 */
366 ieee80211_work_purge(sdata);
367
368 /*
351 * Now delete all active aggregation sessions. 369 * Now delete all active aggregation sessions.
352 */ 370 */
353 rcu_read_lock(); 371 rcu_read_lock();
@@ -514,12 +532,9 @@ static int ieee80211_stop(struct net_device *dev)
514 BSS_CHANGED_BEACON_ENABLED); 532 BSS_CHANGED_BEACON_ENABLED);
515 } 533 }
516 534
517 conf.vif = &sdata->vif;
518 conf.type = sdata->vif.type;
519 conf.mac_addr = dev->dev_addr;
520 /* disable all keys for as long as this netdev is down */ 535 /* disable all keys for as long as this netdev is down */
521 ieee80211_disable_keys(sdata); 536 ieee80211_disable_keys(sdata);
522 drv_remove_interface(local, &conf); 537 drv_remove_interface(local, &sdata->vif);
523 } 538 }
524 539
525 sdata->bss = NULL; 540 sdata->bss = NULL;
@@ -659,7 +674,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
659 .ndo_start_xmit = ieee80211_subif_start_xmit, 674 .ndo_start_xmit = ieee80211_subif_start_xmit,
660 .ndo_set_multicast_list = ieee80211_set_multicast_list, 675 .ndo_set_multicast_list = ieee80211_set_multicast_list,
661 .ndo_change_mtu = ieee80211_change_mtu, 676 .ndo_change_mtu = ieee80211_change_mtu,
662 .ndo_set_mac_address = eth_mac_addr, 677 .ndo_set_mac_address = ieee80211_change_mac,
663 .ndo_select_queue = ieee80211_netdev_select_queue, 678 .ndo_select_queue = ieee80211_netdev_select_queue,
664}; 679};
665 680
@@ -681,10 +696,14 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
681 696
682 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); 697 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
683 698
684 if (!ieee80211_is_data_qos(hdr->frame_control)) { 699 if (!ieee80211_is_data(hdr->frame_control)) {
685 skb->priority = 7; 700 skb->priority = 7;
686 return ieee802_1d_to_ac[skb->priority]; 701 return ieee802_1d_to_ac[skb->priority];
687 } 702 }
703 if (!ieee80211_is_data_qos(hdr->frame_control)) {
704 skb->priority = 0;
705 return ieee802_1d_to_ac[skb->priority];
706 }
688 707
689 p = ieee80211_get_qos_ctl(hdr); 708 p = ieee80211_get_qos_ctl(hdr);
690 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; 709 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
@@ -779,7 +798,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
779 * and goes into the requested mode. 798 * and goes into the requested mode.
780 */ 799 */
781 800
782 if (netif_running(sdata->dev)) 801 if (ieee80211_sdata_running(sdata))
783 return -EBUSY; 802 return -EBUSY;
784 803
785 /* Purge and reset type-dependent state. */ 804 /* Purge and reset type-dependent state. */
@@ -833,6 +852,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
833 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 852 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
834 sdata = netdev_priv(ndev); 853 sdata = netdev_priv(ndev);
835 ndev->ieee80211_ptr = &sdata->wdev; 854 ndev->ieee80211_ptr = &sdata->wdev;
855 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
856 memcpy(sdata->name, ndev->name, IFNAMSIZ);
836 857
837 /* initialise type-independent data */ 858 /* initialise type-independent data */
838 sdata->wdev.wiphy = local->hw.wiphy; 859 sdata->wdev.wiphy = local->hw.wiphy;
@@ -844,8 +865,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
844 865
845 INIT_LIST_HEAD(&sdata->key_list); 866 INIT_LIST_HEAD(&sdata->key_list);
846 867
847 sdata->force_unicast_rateidx = -1; 868 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
848 sdata->max_ratectrl_rateidx = -1; 869 struct ieee80211_supported_band *sband;
870 sband = local->hw.wiphy->bands[i];
871 sdata->rc_rateidx_mask[i] =
872 sband ? (1 << sband->n_bitrates) - 1 : 0;
873 }
849 874
850 /* setup type-dependent data */ 875 /* setup type-dependent data */
851 ieee80211_setup_sdata(sdata, type); 876 ieee80211_setup_sdata(sdata, type);
@@ -938,6 +963,8 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
938 wiphy_name(local->hw.wiphy)); 963 wiphy_name(local->hw.wiphy));
939#endif 964#endif
940 965
966 drv_flush(local, false);
967
941 local->hw.conf.flags |= IEEE80211_CONF_IDLE; 968 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
942 return IEEE80211_CONF_CHANGE_IDLE; 969 return IEEE80211_CONF_CHANGE_IDLE;
943} 970}
@@ -947,16 +974,18 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
947 struct ieee80211_sub_if_data *sdata; 974 struct ieee80211_sub_if_data *sdata;
948 int count = 0; 975 int count = 0;
949 976
977 if (!list_empty(&local->work_list))
978 return ieee80211_idle_off(local, "working");
979
950 if (local->scanning) 980 if (local->scanning)
951 return ieee80211_idle_off(local, "scanning"); 981 return ieee80211_idle_off(local, "scanning");
952 982
953 list_for_each_entry(sdata, &local->interfaces, list) { 983 list_for_each_entry(sdata, &local->interfaces, list) {
954 if (!netif_running(sdata->dev)) 984 if (!ieee80211_sdata_running(sdata))
955 continue; 985 continue;
956 /* do not count disabled managed interfaces */ 986 /* do not count disabled managed interfaces */
957 if (sdata->vif.type == NL80211_IFTYPE_STATION && 987 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
958 !sdata->u.mgd.associated && 988 !sdata->u.mgd.associated)
959 list_empty(&sdata->u.mgd.work_list))
960 continue; 989 continue;
961 /* do not count unused IBSS interfaces */ 990 /* do not count unused IBSS interfaces */
962 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 991 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -984,3 +1013,41 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
984 if (chg) 1013 if (chg)
985 ieee80211_hw_config(local, chg); 1014 ieee80211_hw_config(local, chg);
986} 1015}
1016
1017static int netdev_notify(struct notifier_block *nb,
1018 unsigned long state,
1019 void *ndev)
1020{
1021 struct net_device *dev = ndev;
1022 struct ieee80211_sub_if_data *sdata;
1023
1024 if (state != NETDEV_CHANGENAME)
1025 return 0;
1026
1027 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
1028 return 0;
1029
1030 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
1031 return 0;
1032
1033 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1034
1035 memcpy(sdata->name, dev->name, IFNAMSIZ);
1036
1037 ieee80211_debugfs_rename_netdev(sdata);
1038 return 0;
1039}
1040
1041static struct notifier_block mac80211_netdev_notifier = {
1042 .notifier_call = netdev_notify,
1043};
1044
1045int ieee80211_iface_init(void)
1046{
1047 return register_netdevice_notifier(&mac80211_netdev_notifier);
1048}
1049
1050void ieee80211_iface_exit(void)
1051{
1052 unregister_netdevice_notifier(&mac80211_netdev_notifier);
1053}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 659a42d529e3..e8f6e3b252d8 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -14,6 +14,7 @@
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/slab.h>
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "driver-ops.h" 20#include "driver-ops.h"
@@ -139,7 +140,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
139 struct ieee80211_sub_if_data, 140 struct ieee80211_sub_if_data,
140 u.ap); 141 u.ap);
141 142
142 ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf); 143 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
143 144
144 if (!ret) { 145 if (!ret) {
145 spin_lock_bh(&todo_lock); 146 spin_lock_bh(&todo_lock);
@@ -181,7 +182,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
181 struct ieee80211_sub_if_data, 182 struct ieee80211_sub_if_data,
182 u.ap); 183 u.ap);
183 184
184 ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif, 185 ret = drv_set_key(key->local, DISABLE_KEY, sdata,
185 sta, &key->conf); 186 sta, &key->conf);
186 187
187 if (ret) 188 if (ret)
@@ -421,7 +422,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
421 */ 422 */
422 423
423 /* same here, the AP could be using QoS */ 424 /* same here, the AP could be using QoS */
424 ap = sta_info_get(key->local, key->sdata->u.mgd.bssid); 425 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
425 if (ap) { 426 if (ap) {
426 if (test_sta_flags(ap, WLAN_STA_WME)) 427 if (test_sta_flags(ap, WLAN_STA_WME))
427 key->conf.flags |= 428 key->conf.flags |=
@@ -443,7 +444,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
443 add_todo(old_key, KEY_FLAG_TODO_DELETE); 444 add_todo(old_key, KEY_FLAG_TODO_DELETE);
444 445
445 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); 446 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
446 if (netif_running(sdata->dev)) 447 if (ieee80211_sdata_running(sdata))
447 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); 448 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
448 449
449 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 450 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
@@ -509,7 +510,7 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
509{ 510{
510 ASSERT_RTNL(); 511 ASSERT_RTNL();
511 512
512 if (WARN_ON(!netif_running(sdata->dev))) 513 if (WARN_ON(!ieee80211_sdata_running(sdata)))
513 return; 514 return;
514 515
515 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); 516 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index a49f93b79e92..bdc2968c2bbe 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -59,11 +59,17 @@ enum ieee80211_internal_key_flags {
59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), 59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
60}; 60};
61 61
62enum ieee80211_internal_tkip_state {
63 TKIP_STATE_NOT_INIT,
64 TKIP_STATE_PHASE1_DONE,
65 TKIP_STATE_PHASE1_HW_UPLOADED,
66};
67
62struct tkip_ctx { 68struct tkip_ctx {
63 u32 iv32; 69 u32 iv32;
64 u16 iv16; 70 u16 iv16;
65 u16 p1k[5]; 71 u16 p1k[5];
66 int initialized; 72 enum ieee80211_internal_tkip_state state;
67}; 73};
68 74
69struct ieee80211_key { 75struct ieee80211_key {
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 162a643f16b6..063aad944246 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -8,6 +8,7 @@
8 8
9/* just for IFNAMSIZ */ 9/* just for IFNAMSIZ */
10#include <linux/if.h> 10#include <linux/if.h>
11#include <linux/slab.h>
11#include "led.h" 12#include "led.h"
12 13
13void ieee80211_led_rx(struct ieee80211_local *local) 14void ieee80211_led_rx(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d2d94881f1f..b887e484ae04 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -17,7 +17,6 @@
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/wireless.h>
21#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/pm_qos_params.h> 22#include <linux/pm_qos_params.h>
@@ -32,7 +31,12 @@
32#include "led.h" 31#include "led.h"
33#include "cfg.h" 32#include "cfg.h"
34#include "debugfs.h" 33#include "debugfs.h"
35#include "debugfs_netdev.h" 34
35
36bool ieee80211_disable_40mhz_24ghz;
37module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
38MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
39 "Disable 40MHz support in the 2.4GHz band");
36 40
37void ieee80211_configure_filter(struct ieee80211_local *local) 41void ieee80211_configure_filter(struct ieee80211_local *local)
38{ 42{
@@ -102,6 +106,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
102 if (scan_chan) { 106 if (scan_chan) {
103 chan = scan_chan; 107 chan = scan_chan;
104 channel_type = NL80211_CHAN_NO_HT; 108 channel_type = NL80211_CHAN_NO_HT;
109 } else if (local->tmp_channel) {
110 chan = scan_chan = local->tmp_channel;
111 channel_type = local->tmp_channel_type;
105 } else { 112 } else {
106 chan = local->oper_channel; 113 chan = local->oper_channel;
107 channel_type = local->oper_channel_type; 114 channel_type = local->oper_channel_type;
@@ -114,6 +121,18 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
114 changed |= IEEE80211_CONF_CHANGE_CHANNEL; 121 changed |= IEEE80211_CONF_CHANGE_CHANNEL;
115 } 122 }
116 123
124 if (!conf_is_ht(&local->hw.conf)) {
125 /*
126 * mac80211.h documents that this is only valid
127 * when the channel is set to an HT type, and
128 * that otherwise STATIC is used.
129 */
130 local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
131 } else if (local->hw.conf.smps_mode != local->smps_mode) {
132 local->hw.conf.smps_mode = local->smps_mode;
133 changed |= IEEE80211_CONF_CHANGE_SMPS;
134 }
135
117 if (scan_chan) 136 if (scan_chan)
118 power = chan->max_power; 137 power = chan->max_power;
119 else 138 else
@@ -173,7 +192,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
173 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 192 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
174 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; 193 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
175 else if (sdata->vif.type == NL80211_IFTYPE_AP) 194 else if (sdata->vif.type == NL80211_IFTYPE_AP)
176 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr; 195 sdata->vif.bss_conf.bssid = sdata->vif.addr;
177 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 196 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
178 sdata->vif.bss_conf.bssid = zero; 197 sdata->vif.bss_conf.bssid = zero;
179 } else { 198 } else {
@@ -195,7 +214,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
195 } 214 }
196 215
197 if (changed & BSS_CHANGED_BEACON_ENABLED) { 216 if (changed & BSS_CHANGED_BEACON_ENABLED) {
198 if (local->quiescing || !netif_running(sdata->dev) || 217 if (local->quiescing || !ieee80211_sdata_running(sdata) ||
199 test_bit(SCAN_SW_SCANNING, &local->scanning)) { 218 test_bit(SCAN_SW_SCANNING, &local->scanning)) {
200 sdata->vif.bss_conf.enable_beacon = false; 219 sdata->vif.bss_conf.enable_beacon = false;
201 } else { 220 } else {
@@ -206,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
206 switch (sdata->vif.type) { 225 switch (sdata->vif.type) {
207 case NL80211_IFTYPE_AP: 226 case NL80211_IFTYPE_AP:
208 sdata->vif.bss_conf.enable_beacon = 227 sdata->vif.bss_conf.enable_beacon =
209 !!rcu_dereference(sdata->u.ap.beacon); 228 !!sdata->u.ap.beacon;
210 break; 229 break;
211 case NL80211_IFTYPE_ADHOC: 230 case NL80211_IFTYPE_ADHOC:
212 sdata->vif.bss_conf.enable_beacon = 231 sdata->vif.bss_conf.enable_beacon =
213 !!rcu_dereference(sdata->u.ibss.presp); 232 !!sdata->u.ibss.presp;
214 break; 233 break;
215 case NL80211_IFTYPE_MESH_POINT: 234 case NL80211_IFTYPE_MESH_POINT:
216 sdata->vif.bss_conf.enable_beacon = true; 235 sdata->vif.bss_conf.enable_beacon = true;
@@ -223,8 +242,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
223 } 242 }
224 } 243 }
225 244
226 drv_bss_info_changed(local, &sdata->vif, 245 drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
227 &sdata->vif.bss_conf, changed);
228} 246}
229 247
230u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 248u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -299,6 +317,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
299} 317}
300EXPORT_SYMBOL(ieee80211_restart_hw); 318EXPORT_SYMBOL(ieee80211_restart_hw);
301 319
320static void ieee80211_recalc_smps_work(struct work_struct *work)
321{
322 struct ieee80211_local *local =
323 container_of(work, struct ieee80211_local, recalc_smps);
324
325 mutex_lock(&local->iflist_mtx);
326 ieee80211_recalc_smps(local, NULL);
327 mutex_unlock(&local->iflist_mtx);
328}
329
302struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 330struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
303 const struct ieee80211_ops *ops) 331 const struct ieee80211_ops *ops)
304{ 332{
@@ -333,9 +361,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
333 WIPHY_FLAG_4ADDR_STATION; 361 WIPHY_FLAG_4ADDR_STATION;
334 wiphy->privid = mac80211_wiphy_privid; 362 wiphy->privid = mac80211_wiphy_privid;
335 363
336 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ 364 wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
337 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
338 sizeof(struct cfg80211_bss);
339 365
340 local = wiphy_priv(wiphy); 366 local = wiphy_priv(wiphy);
341 367
@@ -358,6 +384,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
358 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 384 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
359 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 385 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
360 local->user_power_level = -1; 386 local->user_power_level = -1;
387 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
388 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
361 389
362 INIT_LIST_HEAD(&local->interfaces); 390 INIT_LIST_HEAD(&local->interfaces);
363 mutex_init(&local->iflist_mtx); 391 mutex_init(&local->iflist_mtx);
@@ -369,9 +397,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
369 397
370 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 398 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
371 399
400 ieee80211_work_init(local);
401
372 INIT_WORK(&local->restart_work, ieee80211_restart_work); 402 INIT_WORK(&local->restart_work, ieee80211_restart_work);
373 403
374 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 404 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
405 INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
406 local->smps_mode = IEEE80211_SMPS_OFF;
375 407
376 INIT_WORK(&local->dynamic_ps_enable_work, 408 INIT_WORK(&local->dynamic_ps_enable_work,
377 ieee80211_dynamic_ps_enable_work); 409 ieee80211_dynamic_ps_enable_work);
@@ -461,6 +493,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
461 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 493 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
462 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 494 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
463 495
496 WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
497 && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
498 "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
499
464 /* 500 /*
465 * Calculate scan IE length -- we need this to alloc 501 * Calculate scan IE length -- we need this to alloc
466 * memory and to subtract from the driver limit. It 502 * memory and to subtract from the driver limit. It
@@ -522,8 +558,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
522 558
523 debugfs_hw_add(local); 559 debugfs_hw_add(local);
524 560
561 /*
562 * if the driver doesn't specify a max listen interval we
563 * use 5 which should be a safe default
564 */
525 if (local->hw.max_listen_interval == 0) 565 if (local->hw.max_listen_interval == 0)
526 local->hw.max_listen_interval = 1; 566 local->hw.max_listen_interval = 5;
527 567
528 local->hw.conf.listen_interval = local->hw.max_listen_interval; 568 local->hw.conf.listen_interval = local->hw.max_listen_interval;
529 569
@@ -674,11 +714,19 @@ static int __init ieee80211_init(void)
674 714
675 ret = rc80211_pid_init(); 715 ret = rc80211_pid_init();
676 if (ret) 716 if (ret)
677 return ret; 717 goto err_pid;
678 718
679 ieee80211_debugfs_netdev_init(); 719 ret = ieee80211_iface_init();
720 if (ret)
721 goto err_netdev;
680 722
681 return 0; 723 return 0;
724 err_netdev:
725 rc80211_pid_exit();
726 err_pid:
727 rc80211_minstrel_exit();
728
729 return ret;
682} 730}
683 731
684static void __exit ieee80211_exit(void) 732static void __exit ieee80211_exit(void)
@@ -695,7 +743,7 @@ static void __exit ieee80211_exit(void)
695 if (mesh_allocated) 743 if (mesh_allocated)
696 ieee80211s_stop(); 744 ieee80211s_stop();
697 745
698 ieee80211_debugfs_netdev_exit(); 746 ieee80211_iface_exit();
699} 747}
700 748
701 749
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a4331429598..859ee5f3d941 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/slab.h>
11#include <asm/unaligned.h> 12#include <asm/unaligned.h>
12#include "ieee80211_i.h" 13#include "ieee80211_i.h"
13#include "mesh.h" 14#include "mesh.h"
@@ -457,7 +458,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
457 458
458#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 459#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
459 printk(KERN_DEBUG "%s: running mesh housekeeping\n", 460 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
460 sdata->dev->name); 461 sdata->name);
461#endif 462#endif
462 463
463 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 464 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
@@ -565,7 +566,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
565 566
566 /* ignore ProbeResp to foreign address */ 567 /* ignore ProbeResp to foreign address */
567 if (stype == IEEE80211_STYPE_PROBE_RESP && 568 if (stype == IEEE80211_STYPE_PROBE_RESP &&
568 compare_ether_addr(mgmt->da, sdata->dev->dev_addr)) 569 compare_ether_addr(mgmt->da, sdata->vif.addr))
569 return; 570 return;
570 571
571 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 572 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -645,7 +646,7 @@ static void ieee80211_mesh_work(struct work_struct *work)
645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 646 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
646 struct sk_buff *skb; 647 struct sk_buff *skb;
647 648
648 if (!netif_running(sdata->dev)) 649 if (!ieee80211_sdata_running(sdata))
649 return; 650 return;
650 651
651 if (local->scanning) 652 if (local->scanning)
@@ -749,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
749 750
750 switch (fc & IEEE80211_FCTL_STYPE) { 751 switch (fc & IEEE80211_FCTL_STYPE) {
751 case IEEE80211_STYPE_ACTION: 752 case IEEE80211_STYPE_ACTION:
752 if (skb->len < IEEE80211_MIN_ACTION_SIZE)
753 return RX_DROP_MONITOR;
754 /* fall through */
755 case IEEE80211_STYPE_PROBE_RESP: 753 case IEEE80211_STYPE_PROBE_RESP:
756 case IEEE80211_STYPE_BEACON: 754 case IEEE80211_STYPE_BEACON:
757 skb_queue_tail(&ifmsh->skb_queue, skb); 755 skb_queue_tail(&ifmsh->skb_queue, skb);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d28acb6b1f81..fefc45c4b4e8 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -7,6 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/slab.h>
10#include "mesh.h" 11#include "mesh.h"
11 12
12#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG 13#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
@@ -128,9 +129,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
128 IEEE80211_STYPE_ACTION); 129 IEEE80211_STYPE_ACTION);
129 130
130 memcpy(mgmt->da, da, ETH_ALEN); 131 memcpy(mgmt->da, da, ETH_ALEN);
131 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 132 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
132 /* BSSID == SA */ 133 /* BSSID == SA */
133 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 134 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 135 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 136 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
136 137
@@ -222,7 +223,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
222 IEEE80211_STYPE_ACTION); 223 IEEE80211_STYPE_ACTION);
223 224
224 memcpy(mgmt->da, ra, ETH_ALEN); 225 memcpy(mgmt->da, ra, ETH_ALEN);
225 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 226 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
226 /* BSSID is left zeroed, wildcard value */ 227 /* BSSID is left zeroed, wildcard value */
227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 228 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 229 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -335,7 +336,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
335 bool process = true; 336 bool process = true;
336 337
337 rcu_read_lock(); 338 rcu_read_lock();
338 sta = sta_info_get(local, mgmt->sa); 339 sta = sta_info_get(sdata, mgmt->sa);
339 if (!sta) { 340 if (!sta) {
340 rcu_read_unlock(); 341 rcu_read_unlock();
341 return 0; 342 return 0;
@@ -374,7 +375,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
374 new_metric = MAX_METRIC; 375 new_metric = MAX_METRIC;
375 exp_time = TU_TO_EXP_TIME(orig_lifetime); 376 exp_time = TU_TO_EXP_TIME(orig_lifetime);
376 377
377 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 378 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
378 /* This MP is the originator, we are not interested in this 379 /* This MP is the originator, we are not interested in this
379 * frame, except for updating transmitter's path info. 380 * frame, except for updating transmitter's path info.
380 */ 381 */
@@ -391,7 +392,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
391 if (SN_GT(mpath->sn, orig_sn) || 392 if (SN_GT(mpath->sn, orig_sn) ||
392 (mpath->sn == orig_sn && 393 (mpath->sn == orig_sn &&
393 action == MPATH_PREQ && 394 action == MPATH_PREQ &&
394 new_metric > mpath->metric)) { 395 new_metric >= mpath->metric)) {
395 process = false; 396 process = false;
396 fresh_info = false; 397 fresh_info = false;
397 } 398 }
@@ -486,7 +487,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
486 487
487 mhwmp_dbg("received PREQ from %pM\n", orig_addr); 488 mhwmp_dbg("received PREQ from %pM\n", orig_addr);
488 489
489 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 490 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
490 mhwmp_dbg("PREQ is for us\n"); 491 mhwmp_dbg("PREQ is for us\n");
491 forward = false; 492 forward = false;
492 reply = true; 493 reply = true;
@@ -579,7 +580,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
579 * replies 580 * replies
580 */ 581 */
581 target_addr = PREP_IE_TARGET_ADDR(prep_elem); 582 target_addr = PREP_IE_TARGET_ADDR(prep_elem);
582 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) 583 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
583 /* destination, no forwarding required */ 584 /* destination, no forwarding required */
584 return; 585 return;
585 586
@@ -611,7 +612,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
611 612
612 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, 613 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
613 cpu_to_le32(orig_sn), 0, target_addr, 614 cpu_to_le32(orig_sn), 0, target_addr,
614 cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, 615 cpu_to_le32(target_sn), next_hop, hopcount,
615 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), 616 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
616 0, sdata); 617 0, sdata);
617 rcu_read_unlock(); 618 rcu_read_unlock();
@@ -890,7 +891,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
890 target_flags = MP_F_RF; 891 target_flags = MP_F_RF;
891 892
892 spin_unlock_bh(&mpath->state_lock); 893 spin_unlock_bh(&mpath->state_lock);
893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, 894 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, 895 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
895 cpu_to_le32(mpath->sn), broadcast_addr, 0, 896 cpu_to_le32(mpath->sn), broadcast_addr, 0,
896 ttl, cpu_to_le32(lifetime), 0, 897 ttl, cpu_to_le32(lifetime), 0,
@@ -939,7 +940,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
939 if (time_after(jiffies, 940 if (time_after(jiffies,
940 mpath->exp_time - 941 mpath->exp_time -
941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 942 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
942 !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && 943 !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
943 !(mpath->flags & MESH_PATH_RESOLVING) && 944 !(mpath->flags & MESH_PATH_RESOLVING) &&
944 !(mpath->flags & MESH_PATH_FIXED)) { 945 !(mpath->flags & MESH_PATH_FIXED)) {
945 mesh_queue_preq(mpath, 946 mesh_queue_preq(mpath,
@@ -1010,7 +1011,7 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1010{ 1011{
1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1012 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1012 1013
1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr, 1014 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
1014 cpu_to_le32(++ifmsh->sn), 1015 cpu_to_le32(++ifmsh->sn),
1015 0, NULL, 0, broadcast_addr, 1016 0, NULL, 0, broadcast_addr,
1016 0, MESH_TTL, 0, 0, 0, sdata); 1017 0, MESH_TTL, 0, 0, 0, sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0192cfdacae4..181ffd6efd81 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -10,6 +10,7 @@
10#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/random.h> 12#include <linux/random.h>
13#include <linux/slab.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <net/mac80211.h> 16#include <net/mac80211.h>
@@ -260,7 +261,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
260 int err = 0; 261 int err = 0;
261 u32 hash_idx; 262 u32 hash_idx;
262 263
263 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 264 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
264 /* never add ourselves as neighbours */ 265 /* never add ourselves as neighbours */
265 return -ENOTSUPP; 266 return -ENOTSUPP;
266 267
@@ -377,7 +378,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
377 int err = 0; 378 int err = 0;
378 u32 hash_idx; 379 u32 hash_idx;
379 380
380 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 381 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
381 /* never add ourselves as neighbours */ 382 /* never add ourselves as neighbours */
382 return -ENOTSUPP; 383 return -ENOTSUPP;
383 384
@@ -605,7 +606,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
605 struct mesh_path *mpath; 606 struct mesh_path *mpath;
606 u32 sn = 0; 607 u32 sn = 0;
607 608
608 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { 609 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
609 u8 *ra, *da; 610 u8 *ra, *da;
610 611
611 da = hdr->addr3; 612 da = hdr->addr3;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f7c6e6a4248..7b7080e2b49f 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -6,6 +6,7 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9#include <linux/gfp.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/random.h> 11#include <linux/random.h>
11#include "ieee80211_i.h" 12#include "ieee80211_i.h"
@@ -102,7 +103,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
102 if (local->num_sta >= MESH_MAX_PLINKS) 103 if (local->num_sta >= MESH_MAX_PLINKS)
103 return NULL; 104 return NULL;
104 105
105 sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC); 106 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
106 if (!sta) 107 if (!sta)
107 return NULL; 108 return NULL;
108 109
@@ -169,7 +170,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
169 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 170 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
170 IEEE80211_STYPE_ACTION); 171 IEEE80211_STYPE_ACTION);
171 memcpy(mgmt->da, da, ETH_ALEN); 172 memcpy(mgmt->da, da, ETH_ALEN);
172 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 173 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
173 /* BSSID is left zeroed, wildcard value */ 174 /* BSSID is left zeroed, wildcard value */
174 mgmt->u.action.category = MESH_PLINK_CATEGORY; 175 mgmt->u.action.category = MESH_PLINK_CATEGORY;
175 mgmt->u.action.u.plink_action.action_code = action; 176 mgmt->u.action.u.plink_action.action_code = action;
@@ -234,14 +235,14 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
234 235
235 rcu_read_lock(); 236 rcu_read_lock();
236 237
237 sta = sta_info_get(local, hw_addr); 238 sta = sta_info_get(sdata, hw_addr);
238 if (!sta) { 239 if (!sta) {
240 rcu_read_unlock();
241
239 sta = mesh_plink_alloc(sdata, hw_addr, rates); 242 sta = mesh_plink_alloc(sdata, hw_addr, rates);
240 if (!sta) { 243 if (!sta)
241 rcu_read_unlock();
242 return; 244 return;
243 } 245 if (sta_info_insert_rcu(sta)) {
244 if (sta_info_insert(sta)) {
245 rcu_read_unlock(); 246 rcu_read_unlock();
246 return; 247 return;
247 } 248 }
@@ -455,7 +456,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
455 456
456 rcu_read_lock(); 457 rcu_read_lock();
457 458
458 sta = sta_info_get(local, mgmt->sa); 459 sta = sta_info_get(sdata, mgmt->sa);
459 if (!sta && ftype != PLINK_OPEN) { 460 if (!sta && ftype != PLINK_OPEN) {
460 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 461 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
461 rcu_read_unlock(); 462 rcu_read_unlock();
@@ -485,9 +486,11 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
485 } else if (!sta) { 486 } else if (!sta) {
486 /* ftype == PLINK_OPEN */ 487 /* ftype == PLINK_OPEN */
487 u32 rates; 488 u32 rates;
489
490 rcu_read_unlock();
491
488 if (!mesh_plink_free_count(sdata)) { 492 if (!mesh_plink_free_count(sdata)) {
489 mpl_dbg("Mesh plink error: no more free plinks\n"); 493 mpl_dbg("Mesh plink error: no more free plinks\n");
490 rcu_read_unlock();
491 return; 494 return;
492 } 495 }
493 496
@@ -495,10 +498,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
495 sta = mesh_plink_alloc(sdata, mgmt->sa, rates); 498 sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
496 if (!sta) { 499 if (!sta) {
497 mpl_dbg("Mesh plink error: plink table full\n"); 500 mpl_dbg("Mesh plink error: plink table full\n");
498 rcu_read_unlock();
499 return; 501 return;
500 } 502 }
501 if (sta_info_insert(sta)) { 503 if (sta_info_insert_rcu(sta)) {
502 rcu_read_unlock(); 504 rcu_read_unlock();
503 return; 505 return;
504 } 506 }
@@ -743,7 +745,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
743 break; 745 break;
744 default: 746 default:
745 /* should not get here, PLINK_BLOCKED is dealt with at the 747 /* should not get here, PLINK_BLOCKED is dealt with at the
746 * beggining of the function 748 * beginning of the function
747 */ 749 */
748 spin_unlock_bh(&sta->lock); 750 spin_unlock_bh(&sta->lock);
749 break; 751 break;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 05a18f43e1bf..4aefa6dc3091 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -19,6 +19,7 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/pm_qos_params.h> 20#include <linux/pm_qos_params.h>
21#include <linux/crc32.h> 21#include <linux/crc32.h>
22#include <linux/slab.h>
22#include <net/mac80211.h> 23#include <net/mac80211.h>
23#include <asm/unaligned.h> 24#include <asm/unaligned.h>
24 25
@@ -27,10 +28,6 @@
27#include "rate.h" 28#include "rate.h"
28#include "led.h" 29#include "led.h"
29 30
30#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
31#define IEEE80211_AUTH_MAX_TRIES 3
32#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
33#define IEEE80211_ASSOC_MAX_TRIES 3
34#define IEEE80211_MAX_PROBE_TRIES 5 31#define IEEE80211_MAX_PROBE_TRIES 5
35 32
36/* 33/*
@@ -75,11 +72,8 @@ enum rx_mgmt_action {
75 /* caller must call cfg80211_send_disassoc() */ 72 /* caller must call cfg80211_send_disassoc() */
76 RX_MGMT_CFG80211_DISASSOC, 73 RX_MGMT_CFG80211_DISASSOC,
77 74
78 /* caller must call cfg80211_auth_timeout() & free work */ 75 /* caller must tell cfg80211 about internal error */
79 RX_MGMT_CFG80211_AUTH_TO, 76 RX_MGMT_CFG80211_ASSOC_ERROR,
80
81 /* caller must call cfg80211_assoc_timeout() & free work */
82 RX_MGMT_CFG80211_ASSOC_TO,
83}; 77};
84 78
85/* utils */ 79/* utils */
@@ -122,27 +116,6 @@ static int ecw2cw(int ecw)
122 return (1 << ecw) - 1; 116 return (1 << ecw) - 1;
123} 117}
124 118
125static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
126 struct ieee80211_supported_band *sband,
127 u32 *rates)
128{
129 int i, j, count;
130 *rates = 0;
131 count = 0;
132 for (i = 0; i < bss->supp_rates_len; i++) {
133 int rate = (bss->supp_rates[i] & 0x7F) * 5;
134
135 for (j = 0; j < sband->n_bitrates; j++)
136 if (sband->bitrates[j].bitrate == rate) {
137 *rates |= BIT(j);
138 count++;
139 break;
140 }
141 }
142
143 return count;
144}
145
146/* 119/*
147 * ieee80211_enable_ht should be called only after the operating band 120 * ieee80211_enable_ht should be called only after the operating band
148 * has been determined as ht configuration depends on the hw's 121 * has been determined as ht configuration depends on the hw's
@@ -195,6 +168,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
195 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht || 168 ht_changed = conf_is_ht(&local->hw.conf) != enable_ht ||
196 channel_type != local->hw.conf.channel_type; 169 channel_type != local->hw.conf.channel_type;
197 170
171 if (local->tmp_channel)
172 local->tmp_channel_type = channel_type;
198 local->oper_channel_type = channel_type; 173 local->oper_channel_type = channel_type;
199 174
200 if (ht_changed) { 175 if (ht_changed) {
@@ -202,10 +177,11 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
202 ieee80211_hw_config(local, 0); 177 ieee80211_hw_config(local, 0);
203 178
204 rcu_read_lock(); 179 rcu_read_lock();
205 sta = sta_info_get(local, bssid); 180 sta = sta_info_get(sdata, bssid);
206 if (sta) 181 if (sta)
207 rate_control_rate_update(local, sband, sta, 182 rate_control_rate_update(local, sband, sta,
208 IEEE80211_RC_HT_CHANGED); 183 IEEE80211_RC_HT_CHANGED,
184 local->oper_channel_type);
209 rcu_read_unlock(); 185 rcu_read_unlock();
210 } 186 }
211 187
@@ -228,209 +204,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
228 204
229/* frame sending functions */ 205/* frame sending functions */
230 206
231static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
232 struct ieee80211_mgd_work *wk)
233{
234 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
235 struct ieee80211_local *local = sdata->local;
236 struct sk_buff *skb;
237 struct ieee80211_mgmt *mgmt;
238 u8 *pos;
239 const u8 *ies, *ht_ie;
240 int i, len, count, rates_len, supp_rates_len;
241 u16 capab;
242 int wmm = 0;
243 struct ieee80211_supported_band *sband;
244 u32 rates = 0;
245
246 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
247 sizeof(*mgmt) + 200 + wk->ie_len +
248 wk->ssid_len);
249 if (!skb) {
250 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
251 "frame\n", sdata->dev->name);
252 return;
253 }
254 skb_reserve(skb, local->hw.extra_tx_headroom);
255
256 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
257
258 capab = ifmgd->capab;
259
260 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
261 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
262 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
263 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
264 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
265 }
266
267 if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
268 capab |= WLAN_CAPABILITY_PRIVACY;
269 if (wk->bss->wmm_used)
270 wmm = 1;
271
272 /* get all rates supported by the device and the AP as
273 * some APs don't like getting a superset of their rates
274 * in the association request (e.g. D-Link DAP 1353 in
275 * b-only mode) */
276 rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
277
278 if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
279 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
280 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
281
282 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
283 memset(mgmt, 0, 24);
284 memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN);
285 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
286 memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN);
287
288 if (!is_zero_ether_addr(wk->prev_bssid)) {
289 skb_put(skb, 10);
290 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
291 IEEE80211_STYPE_REASSOC_REQ);
292 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
293 mgmt->u.reassoc_req.listen_interval =
294 cpu_to_le16(local->hw.conf.listen_interval);
295 memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid,
296 ETH_ALEN);
297 } else {
298 skb_put(skb, 4);
299 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
300 IEEE80211_STYPE_ASSOC_REQ);
301 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
302 mgmt->u.assoc_req.listen_interval =
303 cpu_to_le16(local->hw.conf.listen_interval);
304 }
305
306 /* SSID */
307 ies = pos = skb_put(skb, 2 + wk->ssid_len);
308 *pos++ = WLAN_EID_SSID;
309 *pos++ = wk->ssid_len;
310 memcpy(pos, wk->ssid, wk->ssid_len);
311
312 /* add all rates which were marked to be used above */
313 supp_rates_len = rates_len;
314 if (supp_rates_len > 8)
315 supp_rates_len = 8;
316
317 len = sband->n_bitrates;
318 pos = skb_put(skb, supp_rates_len + 2);
319 *pos++ = WLAN_EID_SUPP_RATES;
320 *pos++ = supp_rates_len;
321
322 count = 0;
323 for (i = 0; i < sband->n_bitrates; i++) {
324 if (BIT(i) & rates) {
325 int rate = sband->bitrates[i].bitrate;
326 *pos++ = (u8) (rate / 5);
327 if (++count == 8)
328 break;
329 }
330 }
331
332 if (rates_len > count) {
333 pos = skb_put(skb, rates_len - count + 2);
334 *pos++ = WLAN_EID_EXT_SUPP_RATES;
335 *pos++ = rates_len - count;
336
337 for (i++; i < sband->n_bitrates; i++) {
338 if (BIT(i) & rates) {
339 int rate = sband->bitrates[i].bitrate;
340 *pos++ = (u8) (rate / 5);
341 }
342 }
343 }
344
345 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
346 /* 1. power capabilities */
347 pos = skb_put(skb, 4);
348 *pos++ = WLAN_EID_PWR_CAPABILITY;
349 *pos++ = 2;
350 *pos++ = 0; /* min tx power */
351 *pos++ = local->hw.conf.channel->max_power; /* max tx power */
352
353 /* 2. supported channels */
354 /* TODO: get this in reg domain format */
355 pos = skb_put(skb, 2 * sband->n_channels + 2);
356 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
357 *pos++ = 2 * sband->n_channels;
358 for (i = 0; i < sband->n_channels; i++) {
359 *pos++ = ieee80211_frequency_to_channel(
360 sband->channels[i].center_freq);
361 *pos++ = 1; /* one channel in the subband*/
362 }
363 }
364
365 if (wk->ie_len && wk->ie) {
366 pos = skb_put(skb, wk->ie_len);
367 memcpy(pos, wk->ie, wk->ie_len);
368 }
369
370 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) {
371 pos = skb_put(skb, 9);
372 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
373 *pos++ = 7; /* len */
374 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
375 *pos++ = 0x50;
376 *pos++ = 0xf2;
377 *pos++ = 2; /* WME */
378 *pos++ = 0; /* WME info */
379 *pos++ = 1; /* WME ver */
380 *pos++ = 0;
381 }
382
383 /* wmm support is a must to HT */
384 /*
385 * IEEE802.11n does not allow TKIP/WEP as pairwise
386 * ciphers in HT mode. We still associate in non-ht
387 * mode (11a/b/g) if any one of these ciphers is
388 * configured as pairwise.
389 */
390 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
391 sband->ht_cap.ht_supported &&
392 (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) &&
393 ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
394 (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) {
395 struct ieee80211_ht_info *ht_info =
396 (struct ieee80211_ht_info *)(ht_ie + 2);
397 u16 cap = sband->ht_cap.cap;
398 __le16 tmp;
399 u32 flags = local->hw.conf.channel->flags;
400
401 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
402 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
403 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
404 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
405 cap &= ~IEEE80211_HT_CAP_SGI_40;
406 }
407 break;
408 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
409 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
410 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
411 cap &= ~IEEE80211_HT_CAP_SGI_40;
412 }
413 break;
414 }
415
416 tmp = cpu_to_le16(cap);
417 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
418 *pos++ = WLAN_EID_HT_CAPABILITY;
419 *pos++ = sizeof(struct ieee80211_ht_cap);
420 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
421 memcpy(pos, &tmp, sizeof(u16));
422 pos += sizeof(u16);
423 /* TODO: needs a define here for << 2 */
424 *pos++ = sband->ht_cap.ampdu_factor |
425 (sband->ht_cap.ampdu_density << 2);
426 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
427 }
428
429 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
430 ieee80211_tx_skb(sdata, skb);
431}
432
433
434static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 207static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
435 const u8 *bssid, u16 stype, u16 reason, 208 const u8 *bssid, u16 stype, u16 reason,
436 void *cookie) 209 void *cookie)
@@ -443,7 +216,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
443 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 216 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
444 if (!skb) { 217 if (!skb) {
445 printk(KERN_DEBUG "%s: failed to allocate buffer for " 218 printk(KERN_DEBUG "%s: failed to allocate buffer for "
446 "deauth/disassoc frame\n", sdata->dev->name); 219 "deauth/disassoc frame\n", sdata->name);
447 return; 220 return;
448 } 221 }
449 skb_reserve(skb, local->hw.extra_tx_headroom); 222 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -451,7 +224,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
451 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 224 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
452 memset(mgmt, 0, 24); 225 memset(mgmt, 0, 24);
453 memcpy(mgmt->da, bssid, ETH_ALEN); 226 memcpy(mgmt->da, bssid, ETH_ALEN);
454 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 227 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
455 memcpy(mgmt->bssid, bssid, ETH_ALEN); 228 memcpy(mgmt->bssid, bssid, ETH_ALEN);
456 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); 229 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
457 skb_put(skb, 2); 230 skb_put(skb, 2);
@@ -476,30 +249,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
476void ieee80211_send_pspoll(struct ieee80211_local *local, 249void ieee80211_send_pspoll(struct ieee80211_local *local,
477 struct ieee80211_sub_if_data *sdata) 250 struct ieee80211_sub_if_data *sdata)
478{ 251{
479 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
480 struct ieee80211_pspoll *pspoll; 252 struct ieee80211_pspoll *pspoll;
481 struct sk_buff *skb; 253 struct sk_buff *skb;
482 u16 fc;
483 254
484 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); 255 skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
485 if (!skb) { 256 if (!skb)
486 printk(KERN_DEBUG "%s: failed to allocate buffer for "
487 "pspoll frame\n", sdata->dev->name);
488 return; 257 return;
489 }
490 skb_reserve(skb, local->hw.extra_tx_headroom);
491 258
492 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); 259 pspoll = (struct ieee80211_pspoll *) skb->data;
493 memset(pspoll, 0, sizeof(*pspoll)); 260 pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
494 fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM;
495 pspoll->frame_control = cpu_to_le16(fc);
496 pspoll->aid = cpu_to_le16(ifmgd->aid);
497
498 /* aid in PS-Poll has its two MSBs each set to 1 */
499 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
500
501 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
502 memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
503 261
504 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 262 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
505 ieee80211_tx_skb(sdata, skb); 263 ieee80211_tx_skb(sdata, skb);
@@ -510,30 +268,47 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
510 int powersave) 268 int powersave)
511{ 269{
512 struct sk_buff *skb; 270 struct sk_buff *skb;
271 struct ieee80211_hdr_3addr *nullfunc;
272
273 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
274 if (!skb)
275 return;
276
277 nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
278 if (powersave)
279 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
280
281 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
282 ieee80211_tx_skb(sdata, skb);
283}
284
285static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
286 struct ieee80211_sub_if_data *sdata)
287{
288 struct sk_buff *skb;
513 struct ieee80211_hdr *nullfunc; 289 struct ieee80211_hdr *nullfunc;
514 __le16 fc; 290 __le16 fc;
515 291
516 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 292 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
517 return; 293 return;
518 294
519 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 295 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
520 if (!skb) { 296 if (!skb) {
521 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " 297 printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
522 "frame\n", sdata->dev->name); 298 "nullfunc frame\n", sdata->name);
523 return; 299 return;
524 } 300 }
525 skb_reserve(skb, local->hw.extra_tx_headroom); 301 skb_reserve(skb, local->hw.extra_tx_headroom);
526 302
527 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 303 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
528 memset(nullfunc, 0, 24); 304 memset(nullfunc, 0, 30);
529 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 305 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
530 IEEE80211_FCTL_TODS); 306 IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
531 if (powersave)
532 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
533 nullfunc->frame_control = fc; 307 nullfunc->frame_control = fc;
534 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN); 308 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
535 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 309 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
536 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); 310 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
311 memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
537 312
538 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 313 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
539 ieee80211_tx_skb(sdata, skb); 314 ieee80211_tx_skb(sdata, skb);
@@ -546,7 +321,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
546 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); 321 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
547 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 322 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
548 323
549 if (!netif_running(sdata->dev)) 324 if (!ieee80211_sdata_running(sdata))
550 return; 325 return;
551 326
552 mutex_lock(&ifmgd->mtx); 327 mutex_lock(&ifmgd->mtx);
@@ -557,7 +332,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
557 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); 332 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
558 333
559 /* XXX: shouldn't really modify cfg80211-owned data! */ 334 /* XXX: shouldn't really modify cfg80211-owned data! */
560 ifmgd->associated->cbss.channel = sdata->local->oper_channel; 335 ifmgd->associated->channel = sdata->local->oper_channel;
561 336
562 ieee80211_wake_queues_by_reason(&sdata->local->hw, 337 ieee80211_wake_queues_by_reason(&sdata->local->hw,
563 IEEE80211_QUEUE_STOP_REASON_CSA); 338 IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -584,6 +359,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
584 struct ieee80211_channel_sw_ie *sw_elem, 359 struct ieee80211_channel_sw_ie *sw_elem,
585 struct ieee80211_bss *bss) 360 struct ieee80211_bss *bss)
586{ 361{
362 struct cfg80211_bss *cbss =
363 container_of((void *)bss, struct cfg80211_bss, priv);
587 struct ieee80211_channel *new_ch; 364 struct ieee80211_channel *new_ch;
588 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 365 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
589 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); 366 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
@@ -617,7 +394,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
617 mod_timer(&ifmgd->chswitch_timer, 394 mod_timer(&ifmgd->chswitch_timer,
618 jiffies + 395 jiffies +
619 msecs_to_jiffies(sw_elem->count * 396 msecs_to_jiffies(sw_elem->count *
620 bss->cbss.beacon_interval)); 397 cbss->beacon_interval));
621 } 398 }
622} 399}
623 400
@@ -661,6 +438,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
661 } else { 438 } else {
662 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 439 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
663 ieee80211_send_nullfunc(local, sdata, 1); 440 ieee80211_send_nullfunc(local, sdata, 1);
441
442 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
443 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
444 return;
445
664 conf->flags |= IEEE80211_CONF_PS; 446 conf->flags |= IEEE80211_CONF_PS;
665 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 447 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
666 } 448 }
@@ -691,8 +473,13 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
691 return; 473 return;
692 } 474 }
693 475
476 if (!list_empty(&local->work_list)) {
477 local->ps_sdata = NULL;
478 goto change;
479 }
480
694 list_for_each_entry(sdata, &local->interfaces, list) { 481 list_for_each_entry(sdata, &local->interfaces, list) {
695 if (!netif_running(sdata->dev)) 482 if (!ieee80211_sdata_running(sdata))
696 continue; 483 continue;
697 if (sdata->vif.type != NL80211_IFTYPE_STATION) 484 if (sdata->vif.type != NL80211_IFTYPE_STATION)
698 continue; 485 continue;
@@ -701,7 +488,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
701 } 488 }
702 489
703 if (count == 1 && found->u.mgd.powersave && 490 if (count == 1 && found->u.mgd.powersave &&
704 found->u.mgd.associated && list_empty(&found->u.mgd.work_list) && 491 found->u.mgd.associated &&
492 found->u.mgd.associated->beacon_ies &&
705 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | 493 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
706 IEEE80211_STA_CONNECTION_POLL))) { 494 IEEE80211_STA_CONNECTION_POLL))) {
707 s32 beaconint_us; 495 s32 beaconint_us;
@@ -715,20 +503,29 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
715 if (beaconint_us > latency) { 503 if (beaconint_us > latency) {
716 local->ps_sdata = NULL; 504 local->ps_sdata = NULL;
717 } else { 505 } else {
718 u8 dtimper = found->vif.bss_conf.dtim_period; 506 struct ieee80211_bss *bss;
719 int maxslp = 1; 507 int maxslp = 1;
508 u8 dtimper;
720 509
721 if (dtimper > 1) 510 bss = (void *)found->u.mgd.associated->priv;
511 dtimper = bss->dtim_period;
512
513 /* If the TIM IE is invalid, pretend the value is 1 */
514 if (!dtimper)
515 dtimper = 1;
516 else if (dtimper > 1)
722 maxslp = min_t(int, dtimper, 517 maxslp = min_t(int, dtimper,
723 latency / beaconint_us); 518 latency / beaconint_us);
724 519
725 local->hw.conf.max_sleep_period = maxslp; 520 local->hw.conf.max_sleep_period = maxslp;
521 local->hw.conf.ps_dtim_period = dtimper;
726 local->ps_sdata = found; 522 local->ps_sdata = found;
727 } 523 }
728 } else { 524 } else {
729 local->ps_sdata = NULL; 525 local->ps_sdata = NULL;
730 } 526 }
731 527
528 change:
732 ieee80211_change_ps(local); 529 ieee80211_change_ps(local);
733} 530}
734 531
@@ -753,6 +550,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
753 container_of(work, struct ieee80211_local, 550 container_of(work, struct ieee80211_local,
754 dynamic_ps_enable_work); 551 dynamic_ps_enable_work);
755 struct ieee80211_sub_if_data *sdata = local->ps_sdata; 552 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
553 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
756 554
757 /* can only happen when PS was just disabled anyway */ 555 /* can only happen when PS was just disabled anyway */
758 if (!sdata) 556 if (!sdata)
@@ -761,11 +559,17 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
761 if (local->hw.conf.flags & IEEE80211_CONF_PS) 559 if (local->hw.conf.flags & IEEE80211_CONF_PS)
762 return; 560 return;
763 561
764 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 562 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
563 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
765 ieee80211_send_nullfunc(local, sdata, 1); 564 ieee80211_send_nullfunc(local, sdata, 1);
766 565
767 local->hw.conf.flags |= IEEE80211_CONF_PS; 566 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
768 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 567 (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
568 (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
569 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
570 local->hw.conf.flags |= IEEE80211_CONF_PS;
571 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
572 }
769} 573}
770 574
771void ieee80211_dynamic_ps_timer(unsigned long data) 575void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -786,9 +590,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
786 struct ieee80211_tx_queue_params params; 590 struct ieee80211_tx_queue_params params;
787 size_t left; 591 size_t left;
788 int count; 592 int count;
789 u8 *pos; 593 u8 *pos, uapsd_queues = 0;
790 594
791 if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) 595 if (local->hw.queues < 4)
792 return; 596 return;
793 597
794 if (!wmm_param) 598 if (!wmm_param)
@@ -796,6 +600,10 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
796 600
797 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 601 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
798 return; 602 return;
603
604 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
605 uapsd_queues = local->uapsd_queues;
606
799 count = wmm_param[6] & 0x0f; 607 count = wmm_param[6] & 0x0f;
800 if (count == ifmgd->wmm_last_param_set) 608 if (count == ifmgd->wmm_last_param_set)
801 return; 609 return;
@@ -810,6 +618,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
810 for (; left >= 4; left -= 4, pos += 4) { 618 for (; left >= 4; left -= 4, pos += 4) {
811 int aci = (pos[0] >> 5) & 0x03; 619 int aci = (pos[0] >> 5) & 0x03;
812 int acm = (pos[0] >> 4) & 0x01; 620 int acm = (pos[0] >> 4) & 0x01;
621 bool uapsd = false;
813 int queue; 622 int queue;
814 623
815 switch (aci) { 624 switch (aci) {
@@ -817,22 +626,30 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
817 queue = 3; 626 queue = 3;
818 if (acm) 627 if (acm)
819 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ 628 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
629 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
630 uapsd = true;
820 break; 631 break;
821 case 2: /* AC_VI */ 632 case 2: /* AC_VI */
822 queue = 1; 633 queue = 1;
823 if (acm) 634 if (acm)
824 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ 635 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
636 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
637 uapsd = true;
825 break; 638 break;
826 case 3: /* AC_VO */ 639 case 3: /* AC_VO */
827 queue = 0; 640 queue = 0;
828 if (acm) 641 if (acm)
829 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ 642 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
643 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
644 uapsd = true;
830 break; 645 break;
831 case 0: /* AC_BE */ 646 case 0: /* AC_BE */
832 default: 647 default:
833 queue = 2; 648 queue = 2;
834 if (acm) 649 if (acm)
835 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ 650 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
651 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
652 uapsd = true;
836 break; 653 break;
837 } 654 }
838 655
@@ -840,11 +657,14 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
840 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 657 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
841 params.cw_min = ecw2cw(pos[1] & 0x0f); 658 params.cw_min = ecw2cw(pos[1] & 0x0f);
842 params.txop = get_unaligned_le16(pos + 2); 659 params.txop = get_unaligned_le16(pos + 2);
660 params.uapsd = uapsd;
661
843#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 662#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
844 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 663 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
845 "cWmin=%d cWmax=%d txop=%d\n", 664 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
846 wiphy_name(local->hw.wiphy), queue, aci, acm, 665 wiphy_name(local->hw.wiphy), queue, aci, acm,
847 params.aifs, params.cw_min, params.cw_max, params.txop); 666 params.aifs, params.cw_min, params.cw_max, params.txop,
667 params.uapsd);
848#endif 668#endif
849 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx) 669 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
850 printk(KERN_DEBUG "%s: failed to set TX queue " 670 printk(KERN_DEBUG "%s: failed to set TX queue "
@@ -871,6 +691,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
871 } 691 }
872 692
873 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); 693 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
694 if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
695 use_short_slot = true;
874 696
875 if (use_protection != bss_conf->use_cts_prot) { 697 if (use_protection != bss_conf->use_cts_prot) {
876 bss_conf->use_cts_prot = use_protection; 698 bss_conf->use_cts_prot = use_protection;
@@ -891,25 +713,23 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
891} 713}
892 714
893static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, 715static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
894 struct ieee80211_mgd_work *wk, 716 struct cfg80211_bss *cbss,
895 u32 bss_info_changed) 717 u32 bss_info_changed)
896{ 718{
719 struct ieee80211_bss *bss = (void *)cbss->priv;
897 struct ieee80211_local *local = sdata->local; 720 struct ieee80211_local *local = sdata->local;
898 struct ieee80211_bss *bss = wk->bss;
899 721
900 bss_info_changed |= BSS_CHANGED_ASSOC; 722 bss_info_changed |= BSS_CHANGED_ASSOC;
901 /* set timing information */ 723 /* set timing information */
902 sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval; 724 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
903 sdata->vif.bss_conf.timestamp = bss->cbss.tsf; 725 sdata->vif.bss_conf.timestamp = cbss->tsf;
904 sdata->vif.bss_conf.dtim_period = bss->dtim_period;
905 726
906 bss_info_changed |= BSS_CHANGED_BEACON_INT; 727 bss_info_changed |= BSS_CHANGED_BEACON_INT;
907 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 728 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
908 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 729 cbss->capability, bss->has_erp_value, bss->erp_value);
909 730
910 sdata->u.mgd.associated = bss; 731 sdata->u.mgd.associated = cbss;
911 sdata->u.mgd.old_associate_work = wk; 732 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
912 memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
913 733
914 /* just to be sure */ 734 /* just to be sure */
915 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 735 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -940,99 +760,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
940 760
941 mutex_lock(&local->iflist_mtx); 761 mutex_lock(&local->iflist_mtx);
942 ieee80211_recalc_ps(local, -1); 762 ieee80211_recalc_ps(local, -1);
763 ieee80211_recalc_smps(local, sdata);
943 mutex_unlock(&local->iflist_mtx); 764 mutex_unlock(&local->iflist_mtx);
944 765
945 netif_tx_start_all_queues(sdata->dev); 766 netif_tx_start_all_queues(sdata->dev);
946 netif_carrier_on(sdata->dev); 767 netif_carrier_on(sdata->dev);
947} 768}
948 769
949static enum rx_mgmt_action __must_check 770static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
950ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
951 struct ieee80211_mgd_work *wk)
952{
953 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
954 struct ieee80211_local *local = sdata->local;
955
956 wk->tries++;
957 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
958 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
959 sdata->dev->name, wk->bss->cbss.bssid);
960
961 /*
962 * Most likely AP is not in the range so remove the
963 * bss struct for that AP.
964 */
965 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
966
967 /*
968 * We might have a pending scan which had no chance to run yet
969 * due to work needing to be done. Hence, queue the STAs work
970 * again for that.
971 */
972 ieee80211_queue_work(&local->hw, &ifmgd->work);
973 return RX_MGMT_CFG80211_AUTH_TO;
974 }
975
976 printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n",
977 sdata->dev->name, wk->bss->cbss.bssid,
978 wk->tries);
979
980 /*
981 * Direct probe is sent to broadcast address as some APs
982 * will not answer to direct packet in unassociated state.
983 */
984 ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0);
985
986 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
987 run_again(ifmgd, wk->timeout);
988
989 return RX_MGMT_NONE;
990}
991
992
993static enum rx_mgmt_action __must_check
994ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
995 struct ieee80211_mgd_work *wk)
996{
997 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
998 struct ieee80211_local *local = sdata->local;
999
1000 wk->tries++;
1001 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
1002 printk(KERN_DEBUG "%s: authentication with AP %pM"
1003 " timed out\n",
1004 sdata->dev->name, wk->bss->cbss.bssid);
1005
1006 /*
1007 * Most likely AP is not in the range so remove the
1008 * bss struct for that AP.
1009 */
1010 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1011
1012 /*
1013 * We might have a pending scan which had no chance to run yet
1014 * due to work needing to be done. Hence, queue the STAs work
1015 * again for that.
1016 */
1017 ieee80211_queue_work(&local->hw, &ifmgd->work);
1018 return RX_MGMT_CFG80211_AUTH_TO;
1019 }
1020
1021 printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n",
1022 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1023
1024 ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len,
1025 wk->bss->cbss.bssid, NULL, 0, 0);
1026 wk->auth_transaction = 2;
1027
1028 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
1029 run_again(ifmgd, wk->timeout);
1030
1031 return RX_MGMT_NONE;
1032}
1033
1034static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1035 bool deauth)
1036{ 771{
1037 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 772 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1038 struct ieee80211_local *local = sdata->local; 773 struct ieee80211_local *local = sdata->local;
@@ -1045,21 +780,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1045 if (WARN_ON(!ifmgd->associated)) 780 if (WARN_ON(!ifmgd->associated))
1046 return; 781 return;
1047 782
1048 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 783 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1049 784
1050 ifmgd->associated = NULL; 785 ifmgd->associated = NULL;
1051 memset(ifmgd->bssid, 0, ETH_ALEN); 786 memset(ifmgd->bssid, 0, ETH_ALEN);
1052 787
1053 if (deauth) {
1054 kfree(ifmgd->old_associate_work);
1055 ifmgd->old_associate_work = NULL;
1056 } else {
1057 struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
1058
1059 wk->state = IEEE80211_MGD_STATE_IDLE;
1060 list_add(&wk->list, &ifmgd->work_list);
1061 }
1062
1063 /* 788 /*
1064 * we need to commit the associated = NULL change because the 789 * we need to commit the associated = NULL change because the
1065 * scan code uses that to determine whether this iface should 790 * scan code uses that to determine whether this iface should
@@ -1078,9 +803,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1078 netif_carrier_off(sdata->dev); 803 netif_carrier_off(sdata->dev);
1079 804
1080 rcu_read_lock(); 805 rcu_read_lock();
1081 sta = sta_info_get(local, bssid); 806 sta = sta_info_get(sdata, bssid);
1082 if (sta) 807 if (sta) {
808 set_sta_flags(sta, WLAN_STA_DISASSOC);
1083 ieee80211_sta_tear_down_BA_sessions(sta); 809 ieee80211_sta_tear_down_BA_sessions(sta);
810 }
1084 rcu_read_unlock(); 811 rcu_read_unlock();
1085 812
1086 changed |= ieee80211_reset_erp_info(sdata); 813 changed |= ieee80211_reset_erp_info(sdata);
@@ -1113,57 +840,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1113 changed |= BSS_CHANGED_BSSID; 840 changed |= BSS_CHANGED_BSSID;
1114 ieee80211_bss_info_change_notify(sdata, changed); 841 ieee80211_bss_info_change_notify(sdata, changed);
1115 842
1116 rcu_read_lock(); 843 sta_info_destroy_addr(sdata, bssid);
1117
1118 sta = sta_info_get(local, bssid);
1119 if (!sta) {
1120 rcu_read_unlock();
1121 return;
1122 }
1123
1124 sta_info_unlink(&sta);
1125
1126 rcu_read_unlock();
1127
1128 sta_info_destroy(sta);
1129}
1130
1131static enum rx_mgmt_action __must_check
1132ieee80211_associate(struct ieee80211_sub_if_data *sdata,
1133 struct ieee80211_mgd_work *wk)
1134{
1135 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1136 struct ieee80211_local *local = sdata->local;
1137
1138 wk->tries++;
1139 if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) {
1140 printk(KERN_DEBUG "%s: association with AP %pM"
1141 " timed out\n",
1142 sdata->dev->name, wk->bss->cbss.bssid);
1143
1144 /*
1145 * Most likely AP is not in the range so remove the
1146 * bss struct for that AP.
1147 */
1148 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1149
1150 /*
1151 * We might have a pending scan which had no chance to run yet
1152 * due to work needing to be done. Hence, queue the STAs work
1153 * again for that.
1154 */
1155 ieee80211_queue_work(&local->hw, &ifmgd->work);
1156 return RX_MGMT_CFG80211_ASSOC_TO;
1157 }
1158
1159 printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n",
1160 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1161 ieee80211_send_assoc(sdata, wk);
1162
1163 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
1164 run_again(ifmgd, wk->timeout);
1165
1166 return RX_MGMT_NONE;
1167} 844}
1168 845
1169void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 846void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1189,8 +866,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1189 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 866 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1190 const u8 *ssid; 867 const u8 *ssid;
1191 868
1192 ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID); 869 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1193 ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid, 870 ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
1194 ssid + 2, ssid[1], NULL, 0); 871 ssid + 2, ssid[1], NULL, 0);
1195 872
1196 ifmgd->probe_send_count++; 873 ifmgd->probe_send_count++;
@@ -1204,12 +881,15 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1204 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 881 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1205 bool already = false; 882 bool already = false;
1206 883
1207 if (!netif_running(sdata->dev)) 884 if (!ieee80211_sdata_running(sdata))
1208 return; 885 return;
1209 886
1210 if (sdata->local->scanning) 887 if (sdata->local->scanning)
1211 return; 888 return;
1212 889
890 if (sdata->local->tmp_channel)
891 return;
892
1213 mutex_lock(&ifmgd->mtx); 893 mutex_lock(&ifmgd->mtx);
1214 894
1215 if (!ifmgd->associated) 895 if (!ifmgd->associated)
@@ -1218,7 +898,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1218#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 898#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1219 if (beacon && net_ratelimit()) 899 if (beacon && net_ratelimit())
1220 printk(KERN_DEBUG "%s: detected beacon loss from AP " 900 printk(KERN_DEBUG "%s: detected beacon loss from AP "
1221 "- sending probe request\n", sdata->dev->name); 901 "- sending probe request\n", sdata->name);
1222#endif 902#endif
1223 903
1224 /* 904 /*
@@ -1271,88 +951,8 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
1271} 951}
1272EXPORT_SYMBOL(ieee80211_beacon_loss); 952EXPORT_SYMBOL(ieee80211_beacon_loss);
1273 953
1274static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
1275 struct ieee80211_mgd_work *wk)
1276{
1277 wk->state = IEEE80211_MGD_STATE_IDLE;
1278 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
1279}
1280
1281
1282static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1283 struct ieee80211_mgd_work *wk,
1284 struct ieee80211_mgmt *mgmt,
1285 size_t len)
1286{
1287 u8 *pos;
1288 struct ieee802_11_elems elems;
1289
1290 pos = mgmt->u.auth.variable;
1291 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1292 if (!elems.challenge)
1293 return;
1294 ieee80211_send_auth(sdata, 3, wk->auth_alg,
1295 elems.challenge - 2, elems.challenge_len + 2,
1296 wk->bss->cbss.bssid,
1297 wk->key, wk->key_len, wk->key_idx);
1298 wk->auth_transaction = 4;
1299}
1300
1301static enum rx_mgmt_action __must_check
1302ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1303 struct ieee80211_mgd_work *wk,
1304 struct ieee80211_mgmt *mgmt, size_t len)
1305{
1306 u16 auth_alg, auth_transaction, status_code;
1307
1308 if (wk->state != IEEE80211_MGD_STATE_AUTH)
1309 return RX_MGMT_NONE;
1310
1311 if (len < 24 + 6)
1312 return RX_MGMT_NONE;
1313
1314 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1315 return RX_MGMT_NONE;
1316
1317 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1318 return RX_MGMT_NONE;
1319
1320 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1321 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1322 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1323
1324 if (auth_alg != wk->auth_alg ||
1325 auth_transaction != wk->auth_transaction)
1326 return RX_MGMT_NONE;
1327
1328 if (status_code != WLAN_STATUS_SUCCESS) {
1329 list_del(&wk->list);
1330 kfree(wk);
1331 return RX_MGMT_CFG80211_AUTH;
1332 }
1333
1334 switch (wk->auth_alg) {
1335 case WLAN_AUTH_OPEN:
1336 case WLAN_AUTH_LEAP:
1337 case WLAN_AUTH_FT:
1338 ieee80211_auth_completed(sdata, wk);
1339 return RX_MGMT_CFG80211_AUTH;
1340 case WLAN_AUTH_SHARED_KEY:
1341 if (wk->auth_transaction == 4) {
1342 ieee80211_auth_completed(sdata, wk);
1343 return RX_MGMT_CFG80211_AUTH;
1344 } else
1345 ieee80211_auth_challenge(sdata, wk, mgmt, len);
1346 break;
1347 }
1348
1349 return RX_MGMT_NONE;
1350}
1351
1352
1353static enum rx_mgmt_action __must_check 954static enum rx_mgmt_action __must_check
1354ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 955ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1355 struct ieee80211_mgd_work *wk,
1356 struct ieee80211_mgmt *mgmt, size_t len) 956 struct ieee80211_mgmt *mgmt, size_t len)
1357{ 957{
1358 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 958 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1364,23 +964,15 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1364 964
1365 ASSERT_MGD_MTX(ifmgd); 965 ASSERT_MGD_MTX(ifmgd);
1366 966
1367 if (wk) 967 bssid = ifmgd->associated->bssid;
1368 bssid = wk->bss->cbss.bssid;
1369 else
1370 bssid = ifmgd->associated->cbss.bssid;
1371 968
1372 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 969 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1373 970
1374 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 971 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
1375 sdata->dev->name, bssid, reason_code); 972 sdata->name, bssid, reason_code);
1376 973
1377 if (!wk) { 974 ieee80211_set_disassoc(sdata);
1378 ieee80211_set_disassoc(sdata, true); 975 ieee80211_recalc_idle(sdata->local);
1379 ieee80211_recalc_idle(sdata->local);
1380 } else {
1381 list_del(&wk->list);
1382 kfree(wk);
1383 }
1384 976
1385 return RX_MGMT_CFG80211_DEAUTH; 977 return RX_MGMT_CFG80211_DEAUTH;
1386} 978}
@@ -1401,123 +993,72 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1401 if (WARN_ON(!ifmgd->associated)) 993 if (WARN_ON(!ifmgd->associated))
1402 return RX_MGMT_NONE; 994 return RX_MGMT_NONE;
1403 995
1404 if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN))) 996 if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN)))
1405 return RX_MGMT_NONE; 997 return RX_MGMT_NONE;
1406 998
1407 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 999 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1408 1000
1409 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 1001 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1410 sdata->dev->name, mgmt->sa, reason_code); 1002 sdata->name, mgmt->sa, reason_code);
1411 1003
1412 ieee80211_set_disassoc(sdata, false); 1004 ieee80211_set_disassoc(sdata);
1413 ieee80211_recalc_idle(sdata->local); 1005 ieee80211_recalc_idle(sdata->local);
1414 return RX_MGMT_CFG80211_DISASSOC; 1006 return RX_MGMT_CFG80211_DISASSOC;
1415} 1007}
1416 1008
1417 1009
1418static enum rx_mgmt_action __must_check 1010static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1419ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, 1011 struct ieee80211_mgmt *mgmt, size_t len)
1420 struct ieee80211_mgd_work *wk,
1421 struct ieee80211_mgmt *mgmt, size_t len,
1422 bool reassoc)
1423{ 1012{
1013 struct ieee80211_sub_if_data *sdata = wk->sdata;
1424 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1014 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1425 struct ieee80211_local *local = sdata->local; 1015 struct ieee80211_local *local = sdata->local;
1426 struct ieee80211_supported_band *sband; 1016 struct ieee80211_supported_band *sband;
1427 struct sta_info *sta; 1017 struct sta_info *sta;
1018 struct cfg80211_bss *cbss = wk->assoc.bss;
1019 u8 *pos;
1428 u32 rates, basic_rates; 1020 u32 rates, basic_rates;
1429 u16 capab_info, status_code, aid; 1021 u16 capab_info, aid;
1430 struct ieee802_11_elems elems; 1022 struct ieee802_11_elems elems;
1431 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1023 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1432 u8 *pos;
1433 u32 changed = 0; 1024 u32 changed = 0;
1434 int i, j; 1025 int i, j, err;
1435 bool have_higher_than_11mbit = false, newsta = false; 1026 bool have_higher_than_11mbit = false;
1436 u16 ap_ht_cap_flags; 1027 u16 ap_ht_cap_flags;
1437 1028
1438 /* 1029 /* AssocResp and ReassocResp have identical structure */
1439 * AssocResp and ReassocResp have identical structure, so process both
1440 * of them in this function.
1441 */
1442
1443 if (len < 24 + 6)
1444 return RX_MGMT_NONE;
1445 1030
1446 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1447 return RX_MGMT_NONE;
1448
1449 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1450 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
1451 aid = le16_to_cpu(mgmt->u.assoc_resp.aid); 1031 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
1452 1032 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1453 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
1454 "status=%d aid=%d)\n",
1455 sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa,
1456 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
1457
1458 pos = mgmt->u.assoc_resp.variable;
1459 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1460
1461 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
1462 elems.timeout_int && elems.timeout_int_len == 5 &&
1463 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
1464 u32 tu, ms;
1465 tu = get_unaligned_le32(elems.timeout_int + 1);
1466 ms = tu * 1024 / 1000;
1467 printk(KERN_DEBUG "%s: AP rejected association temporarily; "
1468 "comeback duration %u TU (%u ms)\n",
1469 sdata->dev->name, tu, ms);
1470 wk->timeout = jiffies + msecs_to_jiffies(ms);
1471 if (ms > IEEE80211_ASSOC_TIMEOUT)
1472 run_again(ifmgd, jiffies + msecs_to_jiffies(ms));
1473 return RX_MGMT_NONE;
1474 }
1475
1476 if (status_code != WLAN_STATUS_SUCCESS) {
1477 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
1478 sdata->dev->name, status_code);
1479 wk->state = IEEE80211_MGD_STATE_IDLE;
1480 return RX_MGMT_CFG80211_ASSOC;
1481 }
1482 1033
1483 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1034 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
1484 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1035 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
1485 "set\n", sdata->dev->name, aid); 1036 "set\n", sdata->name, aid);
1486 aid &= ~(BIT(15) | BIT(14)); 1037 aid &= ~(BIT(15) | BIT(14));
1487 1038
1039 pos = mgmt->u.assoc_resp.variable;
1040 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1041
1488 if (!elems.supp_rates) { 1042 if (!elems.supp_rates) {
1489 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 1043 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
1490 sdata->dev->name); 1044 sdata->name);
1491 return RX_MGMT_NONE; 1045 return false;
1492 } 1046 }
1493 1047
1494 printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
1495 ifmgd->aid = aid; 1048 ifmgd->aid = aid;
1496 1049
1497 rcu_read_lock(); 1050 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
1498
1499 /* Add STA entry for the AP */
1500 sta = sta_info_get(local, wk->bss->cbss.bssid);
1501 if (!sta) { 1051 if (!sta) {
1502 newsta = true; 1052 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1503 1053 " the AP\n", sdata->name);
1504 rcu_read_unlock(); 1054 return false;
1505
1506 sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL);
1507 if (!sta) {
1508 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1509 " the AP\n", sdata->dev->name);
1510 return RX_MGMT_NONE;
1511 }
1512
1513 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1514 WLAN_STA_ASSOC_AP);
1515 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1516 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1517
1518 rcu_read_lock();
1519 } 1055 }
1520 1056
1057 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1058 WLAN_STA_ASSOC_AP);
1059 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1060 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1061
1521 rates = 0; 1062 rates = 0;
1522 basic_rates = 0; 1063 basic_rates = 0;
1523 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1064 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -1580,40 +1121,40 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1580 if (elems.wmm_param) 1121 if (elems.wmm_param)
1581 set_sta_flags(sta, WLAN_STA_WME); 1122 set_sta_flags(sta, WLAN_STA_WME);
1582 1123
1583 if (newsta) { 1124 err = sta_info_insert(sta);
1584 int err = sta_info_insert(sta); 1125 sta = NULL;
1585 if (err) { 1126 if (err) {
1586 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1127 printk(KERN_DEBUG "%s: failed to insert STA entry for"
1587 " the AP (error %d)\n", sdata->dev->name, err); 1128 " the AP (error %d)\n", sdata->name, err);
1588 rcu_read_unlock(); 1129 return false;
1589 return RX_MGMT_NONE;
1590 }
1591 } 1130 }
1592 1131
1593 rcu_read_unlock();
1594
1595 if (elems.wmm_param) 1132 if (elems.wmm_param)
1596 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1133 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1597 elems.wmm_param_len); 1134 elems.wmm_param_len);
1598 else 1135 else
1599 ieee80211_set_wmm_default(sdata); 1136 ieee80211_set_wmm_default(sdata);
1600 1137
1138 local->oper_channel = wk->chan;
1139
1601 if (elems.ht_info_elem && elems.wmm_param && 1140 if (elems.ht_info_elem && elems.wmm_param &&
1602 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && 1141 (sdata->local->hw.queues >= 4) &&
1603 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 1142 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1604 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1143 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1605 wk->bss->cbss.bssid, 1144 cbss->bssid, ap_ht_cap_flags);
1606 ap_ht_cap_flags);
1607
1608 /* delete work item -- must be before set_associated for PS */
1609 list_del(&wk->list);
1610 1145
1611 /* set AID and assoc capability, 1146 /* set AID and assoc capability,
1612 * ieee80211_set_associated() will tell the driver */ 1147 * ieee80211_set_associated() will tell the driver */
1613 bss_conf->aid = aid; 1148 bss_conf->aid = aid;
1614 bss_conf->assoc_capability = capab_info; 1149 bss_conf->assoc_capability = capab_info;
1615 /* this will take ownership of wk */ 1150 ieee80211_set_associated(sdata, cbss, changed);
1616 ieee80211_set_associated(sdata, wk, changed); 1151
1152 /*
1153 * If we're using 4-addr mode, let the AP know that we're
1154 * doing so, so that it can create the STA VLAN on its side
1155 */
1156 if (ifmgd->use_4addr)
1157 ieee80211_send_4addr_nullfunc(local, sdata);
1617 1158
1618 /* 1159 /*
1619 * Start timer to probe the connection to the AP now. 1160 * Start timer to probe the connection to the AP now.
@@ -1622,7 +1163,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1622 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 1163 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
1623 mod_beacon_timer(sdata); 1164 mod_beacon_timer(sdata);
1624 1165
1625 return RX_MGMT_CFG80211_ASSOC; 1166 return true;
1626} 1167}
1627 1168
1628 1169
@@ -1637,6 +1178,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1637 int freq; 1178 int freq;
1638 struct ieee80211_bss *bss; 1179 struct ieee80211_bss *bss;
1639 struct ieee80211_channel *channel; 1180 struct ieee80211_channel *channel;
1181 bool need_ps = false;
1182
1183 if (sdata->u.mgd.associated) {
1184 bss = (void *)sdata->u.mgd.associated->priv;
1185 /* not previously set so we may need to recalc */
1186 need_ps = !bss->dtim_period;
1187 }
1640 1188
1641 if (elems->ds_params && elems->ds_params_len == 1) 1189 if (elems->ds_params && elems->ds_params_len == 1)
1642 freq = ieee80211_channel_to_frequency(elems->ds_params[0]); 1190 freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
@@ -1656,8 +1204,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1656 if (!sdata->u.mgd.associated) 1204 if (!sdata->u.mgd.associated)
1657 return; 1205 return;
1658 1206
1207 if (need_ps) {
1208 mutex_lock(&local->iflist_mtx);
1209 ieee80211_recalc_ps(local, -1);
1210 mutex_unlock(&local->iflist_mtx);
1211 }
1212
1659 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && 1213 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
1660 (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid, 1214 (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
1661 ETH_ALEN) == 0)) { 1215 ETH_ALEN) == 0)) {
1662 struct ieee80211_channel_sw_ie *sw_elem = 1216 struct ieee80211_channel_sw_ie *sw_elem =
1663 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; 1217 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
@@ -1667,19 +1221,19 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1667 1221
1668 1222
1669static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, 1223static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1670 struct ieee80211_mgd_work *wk, 1224 struct sk_buff *skb)
1671 struct ieee80211_mgmt *mgmt, size_t len,
1672 struct ieee80211_rx_status *rx_status)
1673{ 1225{
1226 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1674 struct ieee80211_if_managed *ifmgd; 1227 struct ieee80211_if_managed *ifmgd;
1675 size_t baselen; 1228 struct ieee80211_rx_status *rx_status = (void *) skb->cb;
1229 size_t baselen, len = skb->len;
1676 struct ieee802_11_elems elems; 1230 struct ieee802_11_elems elems;
1677 1231
1678 ifmgd = &sdata->u.mgd; 1232 ifmgd = &sdata->u.mgd;
1679 1233
1680 ASSERT_MGD_MTX(ifmgd); 1234 ASSERT_MGD_MTX(ifmgd);
1681 1235
1682 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 1236 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
1683 return; /* ignore ProbeResp to foreign address */ 1237 return; /* ignore ProbeResp to foreign address */
1684 1238
1685 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 1239 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1691,17 +1245,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1691 1245
1692 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 1246 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1693 1247
1694 /* direct probe may be part of the association flow */
1695 if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
1696 printk(KERN_DEBUG "%s: direct probe responded\n",
1697 sdata->dev->name);
1698 wk->tries = 0;
1699 wk->state = IEEE80211_MGD_STATE_AUTH;
1700 WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE);
1701 }
1702
1703 if (ifmgd->associated && 1248 if (ifmgd->associated &&
1704 memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 && 1249 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 &&
1705 ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 1250 ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
1706 IEEE80211_STA_CONNECTION_POLL)) { 1251 IEEE80211_STA_CONNECTION_POLL)) {
1707 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1252 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -1774,7 +1319,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1774 if (!ifmgd->associated) 1319 if (!ifmgd->associated)
1775 return; 1320 return;
1776 1321
1777 bssid = ifmgd->associated->cbss.bssid; 1322 bssid = ifmgd->associated->bssid;
1778 1323
1779 /* 1324 /*
1780 * And in theory even frames from a different AP we were just 1325 * And in theory even frames from a different AP we were just
@@ -1787,7 +1332,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1787#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1332#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1788 if (net_ratelimit()) { 1333 if (net_ratelimit()) {
1789 printk(KERN_DEBUG "%s: cancelling probereq poll due " 1334 printk(KERN_DEBUG "%s: cancelling probereq poll due "
1790 "to a received beacon\n", sdata->dev->name); 1335 "to a received beacon\n", sdata->name);
1791 } 1336 }
1792#endif 1337#endif
1793 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 1338 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
@@ -1865,7 +1410,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1865 1410
1866 rcu_read_lock(); 1411 rcu_read_lock();
1867 1412
1868 sta = sta_info_get(local, bssid); 1413 sta = sta_info_get(sdata, bssid);
1869 if (WARN_ON(!sta)) { 1414 if (WARN_ON(!sta)) {
1870 rcu_read_unlock(); 1415 rcu_read_unlock();
1871 return; 1416 return;
@@ -1913,9 +1458,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1913 switch (fc & IEEE80211_FCTL_STYPE) { 1458 switch (fc & IEEE80211_FCTL_STYPE) {
1914 case IEEE80211_STYPE_PROBE_RESP: 1459 case IEEE80211_STYPE_PROBE_RESP:
1915 case IEEE80211_STYPE_BEACON: 1460 case IEEE80211_STYPE_BEACON:
1916 case IEEE80211_STYPE_AUTH:
1917 case IEEE80211_STYPE_ASSOC_RESP:
1918 case IEEE80211_STYPE_REASSOC_RESP:
1919 case IEEE80211_STYPE_DEAUTH: 1461 case IEEE80211_STYPE_DEAUTH:
1920 case IEEE80211_STYPE_DISASSOC: 1462 case IEEE80211_STYPE_DISASSOC:
1921 case IEEE80211_STYPE_ACTION: 1463 case IEEE80211_STYPE_ACTION:
@@ -1933,7 +1475,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1933 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1475 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1934 struct ieee80211_rx_status *rx_status; 1476 struct ieee80211_rx_status *rx_status;
1935 struct ieee80211_mgmt *mgmt; 1477 struct ieee80211_mgmt *mgmt;
1936 struct ieee80211_mgd_work *wk;
1937 enum rx_mgmt_action rma = RX_MGMT_NONE; 1478 enum rx_mgmt_action rma = RX_MGMT_NONE;
1938 u16 fc; 1479 u16 fc;
1939 1480
@@ -1944,20 +1485,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1944 mutex_lock(&ifmgd->mtx); 1485 mutex_lock(&ifmgd->mtx);
1945 1486
1946 if (ifmgd->associated && 1487 if (ifmgd->associated &&
1947 memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid, 1488 memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) {
1948 ETH_ALEN) == 0) {
1949 switch (fc & IEEE80211_FCTL_STYPE) { 1489 switch (fc & IEEE80211_FCTL_STYPE) {
1950 case IEEE80211_STYPE_BEACON: 1490 case IEEE80211_STYPE_BEACON:
1951 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, 1491 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
1952 rx_status); 1492 rx_status);
1953 break; 1493 break;
1954 case IEEE80211_STYPE_PROBE_RESP: 1494 case IEEE80211_STYPE_PROBE_RESP:
1955 ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt, 1495 ieee80211_rx_mgmt_probe_resp(sdata, skb);
1956 skb->len, rx_status);
1957 break; 1496 break;
1958 case IEEE80211_STYPE_DEAUTH: 1497 case IEEE80211_STYPE_DEAUTH:
1959 rma = ieee80211_rx_mgmt_deauth(sdata, NULL, 1498 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
1960 mgmt, skb->len);
1961 break; 1499 break;
1962 case IEEE80211_STYPE_DISASSOC: 1500 case IEEE80211_STYPE_DISASSOC:
1963 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 1501 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
@@ -1968,7 +1506,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1968 1506
1969 ieee80211_sta_process_chanswitch(sdata, 1507 ieee80211_sta_process_chanswitch(sdata,
1970 &mgmt->u.action.u.chan_switch.sw_elem, 1508 &mgmt->u.action.u.chan_switch.sw_elem,
1971 ifmgd->associated); 1509 (void *)ifmgd->associated->priv);
1972 break; 1510 break;
1973 } 1511 }
1974 mutex_unlock(&ifmgd->mtx); 1512 mutex_unlock(&ifmgd->mtx);
@@ -1989,58 +1527,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1989 goto out; 1527 goto out;
1990 } 1528 }
1991 1529
1992 list_for_each_entry(wk, &ifmgd->work_list, list) {
1993 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1994 continue;
1995
1996 switch (fc & IEEE80211_FCTL_STYPE) {
1997 case IEEE80211_STYPE_PROBE_RESP:
1998 ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len,
1999 rx_status);
2000 break;
2001 case IEEE80211_STYPE_AUTH:
2002 rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len);
2003 break;
2004 case IEEE80211_STYPE_ASSOC_RESP:
2005 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
2006 skb->len, false);
2007 break;
2008 case IEEE80211_STYPE_REASSOC_RESP:
2009 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
2010 skb->len, true);
2011 break;
2012 case IEEE80211_STYPE_DEAUTH:
2013 rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt,
2014 skb->len);
2015 break;
2016 }
2017 /*
2018 * We've processed this frame for that work, so it can't
2019 * belong to another work struct.
2020 * NB: this is also required for correctness because the
2021 * called functions can free 'wk', and for 'rma'!
2022 */
2023 break;
2024 }
2025
2026 mutex_unlock(&ifmgd->mtx); 1530 mutex_unlock(&ifmgd->mtx);
2027 1531
2028 switch (rma) { 1532 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
2029 case RX_MGMT_NONE: 1533 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
2030 /* no action */
2031 break;
2032 case RX_MGMT_CFG80211_AUTH:
2033 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len);
2034 break;
2035 case RX_MGMT_CFG80211_ASSOC:
2036 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
2037 break;
2038 case RX_MGMT_CFG80211_DEAUTH:
2039 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1534 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2040 break;
2041 default:
2042 WARN(1, "unexpected: %d", rma);
2043 }
2044 1535
2045 out: 1536 out:
2046 kfree_skb(skb); 1537 kfree_skb(skb);
@@ -2068,12 +1559,8 @@ static void ieee80211_sta_work(struct work_struct *work)
2068 struct ieee80211_local *local = sdata->local; 1559 struct ieee80211_local *local = sdata->local;
2069 struct ieee80211_if_managed *ifmgd; 1560 struct ieee80211_if_managed *ifmgd;
2070 struct sk_buff *skb; 1561 struct sk_buff *skb;
2071 struct ieee80211_mgd_work *wk, *tmp;
2072 LIST_HEAD(free_work);
2073 enum rx_mgmt_action rma;
2074 bool anybusy = false;
2075 1562
2076 if (!netif_running(sdata->dev)) 1563 if (!ieee80211_sdata_running(sdata))
2077 return; 1564 return;
2078 1565
2079 if (local->scanning) 1566 if (local->scanning)
@@ -2104,7 +1591,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2104 ifmgd->associated) { 1591 ifmgd->associated) {
2105 u8 bssid[ETH_ALEN]; 1592 u8 bssid[ETH_ALEN];
2106 1593
2107 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 1594 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
2108 if (time_is_after_jiffies(ifmgd->probe_timeout)) 1595 if (time_is_after_jiffies(ifmgd->probe_timeout))
2109 run_again(ifmgd, ifmgd->probe_timeout); 1596 run_again(ifmgd, ifmgd->probe_timeout);
2110 1597
@@ -2126,7 +1613,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2126 printk(KERN_DEBUG "No probe response from AP %pM" 1613 printk(KERN_DEBUG "No probe response from AP %pM"
2127 " after %dms, disconnecting.\n", 1614 " after %dms, disconnecting.\n",
2128 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1615 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
2129 ieee80211_set_disassoc(sdata, true); 1616 ieee80211_set_disassoc(sdata);
2130 ieee80211_recalc_idle(local); 1617 ieee80211_recalc_idle(local);
2131 mutex_unlock(&ifmgd->mtx); 1618 mutex_unlock(&ifmgd->mtx);
2132 /* 1619 /*
@@ -2141,87 +1628,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2141 } 1628 }
2142 } 1629 }
2143 1630
2144
2145 ieee80211_recalc_idle(local);
2146
2147 list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
2148 if (time_is_after_jiffies(wk->timeout)) {
2149 /*
2150 * This work item isn't supposed to be worked on
2151 * right now, but take care to adjust the timer
2152 * properly.
2153 */
2154 run_again(ifmgd, wk->timeout);
2155 continue;
2156 }
2157
2158 switch (wk->state) {
2159 default:
2160 WARN_ON(1);
2161 /* fall through */
2162 case IEEE80211_MGD_STATE_IDLE:
2163 /* nothing */
2164 rma = RX_MGMT_NONE;
2165 break;
2166 case IEEE80211_MGD_STATE_PROBE:
2167 rma = ieee80211_direct_probe(sdata, wk);
2168 break;
2169 case IEEE80211_MGD_STATE_AUTH:
2170 rma = ieee80211_authenticate(sdata, wk);
2171 break;
2172 case IEEE80211_MGD_STATE_ASSOC:
2173 rma = ieee80211_associate(sdata, wk);
2174 break;
2175 }
2176
2177 switch (rma) {
2178 case RX_MGMT_NONE:
2179 /* no action required */
2180 break;
2181 case RX_MGMT_CFG80211_AUTH_TO:
2182 case RX_MGMT_CFG80211_ASSOC_TO:
2183 list_del(&wk->list);
2184 list_add(&wk->list, &free_work);
2185 wk->tries = rma; /* small abuse but only local */
2186 break;
2187 default:
2188 WARN(1, "unexpected: %d", rma);
2189 }
2190 }
2191
2192 list_for_each_entry(wk, &ifmgd->work_list, list) {
2193 if (wk->state != IEEE80211_MGD_STATE_IDLE) {
2194 anybusy = true;
2195 break;
2196 }
2197 }
2198 if (!anybusy &&
2199 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
2200 ieee80211_queue_delayed_work(&local->hw,
2201 &local->scan_work,
2202 round_jiffies_relative(0));
2203
2204 mutex_unlock(&ifmgd->mtx); 1631 mutex_unlock(&ifmgd->mtx);
2205
2206 list_for_each_entry_safe(wk, tmp, &free_work, list) {
2207 switch (wk->tries) {
2208 case RX_MGMT_CFG80211_AUTH_TO:
2209 cfg80211_send_auth_timeout(sdata->dev,
2210 wk->bss->cbss.bssid);
2211 break;
2212 case RX_MGMT_CFG80211_ASSOC_TO:
2213 cfg80211_send_assoc_timeout(sdata->dev,
2214 wk->bss->cbss.bssid);
2215 break;
2216 default:
2217 WARN(1, "unexpected: %d", wk->tries);
2218 }
2219
2220 list_del(&wk->list);
2221 kfree(wk);
2222 }
2223
2224 ieee80211_recalc_idle(local);
2225} 1632}
2226 1633
2227static void ieee80211_sta_bcn_mon_timer(unsigned long data) 1634static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2330,14 +1737,14 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2330 (unsigned long) sdata); 1737 (unsigned long) sdata);
2331 skb_queue_head_init(&ifmgd->skb_queue); 1738 skb_queue_head_init(&ifmgd->skb_queue);
2332 1739
2333 INIT_LIST_HEAD(&ifmgd->work_list);
2334
2335 ifmgd->capab = WLAN_CAPABILITY_ESS;
2336 ifmgd->flags = 0; 1740 ifmgd->flags = 0;
2337 if (sdata->local->hw.queues >= 4)
2338 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
2339 1741
2340 mutex_init(&ifmgd->mtx); 1742 mutex_init(&ifmgd->mtx);
1743
1744 if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
1745 ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
1746 else
1747 ifmgd->req_smps = IEEE80211_SMPS_OFF;
2341} 1748}
2342 1749
2343/* scan finished notification */ 1750/* scan finished notification */
@@ -2368,12 +1775,34 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
2368} 1775}
2369 1776
2370/* config hooks */ 1777/* config hooks */
1778static enum work_done_result
1779ieee80211_probe_auth_done(struct ieee80211_work *wk,
1780 struct sk_buff *skb)
1781{
1782 if (!skb) {
1783 cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
1784 return WORK_DONE_DESTROY;
1785 }
1786
1787 if (wk->type == IEEE80211_WORK_AUTH) {
1788 cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
1789 return WORK_DONE_DESTROY;
1790 }
1791
1792 mutex_lock(&wk->sdata->u.mgd.mtx);
1793 ieee80211_rx_mgmt_probe_resp(wk->sdata, skb);
1794 mutex_unlock(&wk->sdata->u.mgd.mtx);
1795
1796 wk->type = IEEE80211_WORK_AUTH;
1797 wk->probe_auth.tries = 0;
1798 return WORK_DONE_REQUEUE;
1799}
1800
2371int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 1801int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2372 struct cfg80211_auth_request *req) 1802 struct cfg80211_auth_request *req)
2373{ 1803{
2374 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2375 const u8 *ssid; 1804 const u8 *ssid;
2376 struct ieee80211_mgd_work *wk; 1805 struct ieee80211_work *wk;
2377 u16 auth_alg; 1806 u16 auth_alg;
2378 1807
2379 switch (req->auth_type) { 1808 switch (req->auth_type) {
@@ -2397,7 +1826,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2397 if (!wk) 1826 if (!wk)
2398 return -ENOMEM; 1827 return -ENOMEM;
2399 1828
2400 wk->bss = (void *)req->bss; 1829 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
2401 1830
2402 if (req->ie && req->ie_len) { 1831 if (req->ie && req->ie_len) {
2403 memcpy(wk->ie, req->ie, req->ie_len); 1832 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2405,68 +1834,95 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2405 } 1834 }
2406 1835
2407 if (req->key && req->key_len) { 1836 if (req->key && req->key_len) {
2408 wk->key_len = req->key_len; 1837 wk->probe_auth.key_len = req->key_len;
2409 wk->key_idx = req->key_idx; 1838 wk->probe_auth.key_idx = req->key_idx;
2410 memcpy(wk->key, req->key, req->key_len); 1839 memcpy(wk->probe_auth.key, req->key, req->key_len);
2411 } 1840 }
2412 1841
2413 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 1842 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
2414 memcpy(wk->ssid, ssid + 2, ssid[1]); 1843 memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
2415 wk->ssid_len = ssid[1]; 1844 wk->probe_auth.ssid_len = ssid[1];
2416 1845
2417 wk->state = IEEE80211_MGD_STATE_PROBE; 1846 wk->probe_auth.algorithm = auth_alg;
2418 wk->auth_alg = auth_alg; 1847 wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
2419 wk->timeout = jiffies; /* run right away */
2420 1848
2421 /* 1849 /* if we already have a probe, don't probe again */
2422 * XXX: if still associated need to tell AP that we're going 1850 if (req->bss->proberesp_ies)
2423 * to sleep and then change channel etc. 1851 wk->type = IEEE80211_WORK_AUTH;
2424 */ 1852 else
2425 sdata->local->oper_channel = req->bss->channel; 1853 wk->type = IEEE80211_WORK_DIRECT_PROBE;
2426 ieee80211_hw_config(sdata->local, 0); 1854 wk->chan = req->bss->channel;
2427 1855 wk->sdata = sdata;
2428 mutex_lock(&ifmgd->mtx); 1856 wk->done = ieee80211_probe_auth_done;
2429 list_add(&wk->list, &sdata->u.mgd.work_list);
2430 mutex_unlock(&ifmgd->mtx);
2431 1857
2432 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 1858 ieee80211_add_work(wk);
2433 return 0; 1859 return 0;
2434} 1860}
2435 1861
2436int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 1862static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2437 struct cfg80211_assoc_request *req) 1863 struct sk_buff *skb)
2438{ 1864{
2439 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1865 struct ieee80211_mgmt *mgmt;
2440 struct ieee80211_mgd_work *wk, *found = NULL; 1866 u16 status;
2441 int i, err;
2442 1867
2443 mutex_lock(&ifmgd->mtx); 1868 if (!skb) {
1869 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
1870 return WORK_DONE_DESTROY;
1871 }
2444 1872
2445 list_for_each_entry(wk, &ifmgd->work_list, list) { 1873 mgmt = (void *)skb->data;
2446 if (&wk->bss->cbss == req->bss && 1874 status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2447 wk->state == IEEE80211_MGD_STATE_IDLE) { 1875
2448 found = wk; 1876 if (status == WLAN_STATUS_SUCCESS) {
2449 break; 1877 mutex_lock(&wk->sdata->u.mgd.mtx);
1878 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
1879 mutex_unlock(&wk->sdata->u.mgd.mtx);
1880 /* oops -- internal error -- send timeout for now */
1881 cfg80211_send_assoc_timeout(wk->sdata->dev,
1882 wk->filter_ta);
1883 return WORK_DONE_DESTROY;
2450 } 1884 }
1885 mutex_unlock(&wk->sdata->u.mgd.mtx);
2451 } 1886 }
2452 1887
2453 if (!found) { 1888 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
2454 err = -ENOLINK; 1889 return WORK_DONE_DESTROY;
2455 goto out; 1890}
2456 }
2457 1891
2458 list_del(&found->list); 1892int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1893 struct cfg80211_assoc_request *req)
1894{
1895 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1896 struct ieee80211_bss *bss = (void *)req->bss->priv;
1897 struct ieee80211_work *wk;
1898 const u8 *ssid;
1899 int i;
2459 1900
2460 wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL); 1901 mutex_lock(&ifmgd->mtx);
2461 if (!wk) { 1902 if (ifmgd->associated) {
2462 list_add(&found->list, &ifmgd->work_list); 1903 if (!req->prev_bssid ||
2463 err = -ENOMEM; 1904 memcmp(req->prev_bssid, ifmgd->associated->bssid,
2464 goto out; 1905 ETH_ALEN)) {
1906 /*
1907 * We are already associated and the request was not a
1908 * reassociation request from the current BSS, so
1909 * reject it.
1910 */
1911 mutex_unlock(&ifmgd->mtx);
1912 return -EALREADY;
1913 }
1914
1915 /* Trying to reassociate - clear previous association state */
1916 ieee80211_set_disassoc(sdata);
2465 } 1917 }
1918 mutex_unlock(&ifmgd->mtx);
2466 1919
2467 list_add(&wk->list, &ifmgd->work_list); 1920 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
1921 if (!wk)
1922 return -ENOMEM;
2468 1923
2469 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 1924 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
1925 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
2470 1926
2471 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) 1927 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
2472 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 1928 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
@@ -2474,8 +1930,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2474 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 1930 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
2475 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 1931 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2476 1932
2477 sdata->local->oper_channel = req->bss->channel;
2478 ieee80211_hw_config(sdata->local, 0);
2479 1933
2480 if (req->ie && req->ie_len) { 1934 if (req->ie && req->ie_len) {
2481 memcpy(wk->ie, req->ie, req->ie_len); 1935 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2483,12 +1937,55 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2483 } else 1937 } else
2484 wk->ie_len = 0; 1938 wk->ie_len = 0;
2485 1939
1940 wk->assoc.bss = req->bss;
1941
1942 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
1943
1944 /* new association always uses requested smps mode */
1945 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
1946 if (ifmgd->powersave)
1947 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
1948 else
1949 ifmgd->ap_smps = IEEE80211_SMPS_OFF;
1950 } else
1951 ifmgd->ap_smps = ifmgd->req_smps;
1952
1953 wk->assoc.smps = ifmgd->ap_smps;
1954 /*
1955 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
1956 * We still associate in non-HT mode (11a/b/g) if any one of these
1957 * ciphers is configured as pairwise.
1958 * We can set this to true for non-11n hardware, that'll be checked
1959 * separately along with the peer capabilities.
1960 */
1961 wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
1962 wk->assoc.capability = req->bss->capability;
1963 wk->assoc.wmm_used = bss->wmm_used;
1964 wk->assoc.supp_rates = bss->supp_rates;
1965 wk->assoc.supp_rates_len = bss->supp_rates_len;
1966 wk->assoc.ht_information_ie =
1967 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
1968
1969 if (bss->wmm_used && bss->uapsd_supported &&
1970 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
1971 wk->assoc.uapsd_used = true;
1972 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
1973 } else {
1974 wk->assoc.uapsd_used = false;
1975 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
1976 }
1977
1978 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
1979 memcpy(wk->assoc.ssid, ssid + 2, ssid[1]);
1980 wk->assoc.ssid_len = ssid[1];
1981
2486 if (req->prev_bssid) 1982 if (req->prev_bssid)
2487 memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN); 1983 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
2488 1984
2489 wk->state = IEEE80211_MGD_STATE_ASSOC; 1985 wk->type = IEEE80211_WORK_ASSOC;
2490 wk->tries = 0; 1986 wk->chan = req->bss->channel;
2491 wk->timeout = jiffies; /* run right away */ 1987 wk->sdata = sdata;
1988 wk->done = ieee80211_assoc_done;
2492 1989
2493 if (req->use_mfp) { 1990 if (req->use_mfp) {
2494 ifmgd->mfp = IEEE80211_MFP_REQUIRED; 1991 ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2503,69 +2000,65 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2503 else 2000 else
2504 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; 2001 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
2505 2002
2506 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 2003 ieee80211_add_work(wk);
2507 2004 return 0;
2508 err = 0;
2509
2510 out:
2511 mutex_unlock(&ifmgd->mtx);
2512 return err;
2513} 2005}
2514 2006
2515int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 2007int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2516 struct cfg80211_deauth_request *req, 2008 struct cfg80211_deauth_request *req,
2517 void *cookie) 2009 void *cookie)
2518{ 2010{
2011 struct ieee80211_local *local = sdata->local;
2519 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2012 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2520 struct ieee80211_mgd_work *wk; 2013 struct ieee80211_work *wk;
2521 const u8 *bssid = NULL; 2014 const u8 *bssid = req->bss->bssid;
2522 bool not_auth_yet = false;
2523 2015
2524 mutex_lock(&ifmgd->mtx); 2016 mutex_lock(&ifmgd->mtx);
2525 2017
2526 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) { 2018 if (ifmgd->associated == req->bss) {
2527 bssid = req->bss->bssid; 2019 bssid = req->bss->bssid;
2528 ieee80211_set_disassoc(sdata, true); 2020 ieee80211_set_disassoc(sdata);
2529 } else list_for_each_entry(wk, &ifmgd->work_list, list) {
2530 if (&wk->bss->cbss == req->bss) {
2531 bssid = req->bss->bssid;
2532 if (wk->state == IEEE80211_MGD_STATE_PROBE)
2533 not_auth_yet = true;
2534 list_del(&wk->list);
2535 kfree(wk);
2536 break;
2537 }
2538 }
2539
2540 /*
2541 * If somebody requests authentication and we haven't
2542 * sent out an auth frame yet there's no need to send
2543 * out a deauth frame either. If the state was PROBE,
2544 * then this is the case. If it's AUTH we have sent a
2545 * frame, and if it's IDLE we have completed the auth
2546 * process already.
2547 */
2548 if (not_auth_yet) {
2549 mutex_unlock(&ifmgd->mtx); 2021 mutex_unlock(&ifmgd->mtx);
2550 __cfg80211_auth_canceled(sdata->dev, bssid); 2022 } else {
2551 return 0; 2023 bool not_auth_yet = false;
2552 }
2553 2024
2554 /*
2555 * cfg80211 should catch this ... but it's racy since
2556 * we can receive a deauth frame, process it, hand it
2557 * to cfg80211 while that's in a locked section already
2558 * trying to tell us that the user wants to disconnect.
2559 */
2560 if (!bssid) {
2561 mutex_unlock(&ifmgd->mtx); 2025 mutex_unlock(&ifmgd->mtx);
2562 return -ENOLINK;
2563 }
2564 2026
2565 mutex_unlock(&ifmgd->mtx); 2027 mutex_lock(&local->work_mtx);
2028 list_for_each_entry(wk, &local->work_list, list) {
2029 if (wk->sdata != sdata)
2030 continue;
2031
2032 if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
2033 wk->type != IEEE80211_WORK_AUTH)
2034 continue;
2035
2036 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
2037 continue;
2038
2039 not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
2040 list_del_rcu(&wk->list);
2041 free_work(wk);
2042 break;
2043 }
2044 mutex_unlock(&local->work_mtx);
2045
2046 /*
2047 * If somebody requests authentication and we haven't
2048 * sent out an auth frame yet there's no need to send
2049 * out a deauth frame either. If the state was PROBE,
2050 * then this is the case. If it's AUTH we have sent a
2051 * frame, and if it's IDLE we have completed the auth
2052 * process already.
2053 */
2054 if (not_auth_yet) {
2055 __cfg80211_auth_canceled(sdata->dev, bssid);
2056 return 0;
2057 }
2058 }
2566 2059
2567 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 2060 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
2568 sdata->dev->name, bssid, req->reason_code); 2061 sdata->name, bssid, req->reason_code);
2569 2062
2570 ieee80211_send_deauth_disassoc(sdata, bssid, 2063 ieee80211_send_deauth_disassoc(sdata, bssid,
2571 IEEE80211_STYPE_DEAUTH, req->reason_code, 2064 IEEE80211_STYPE_DEAUTH, req->reason_code,
@@ -2590,15 +2083,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2590 * to cfg80211 while that's in a locked section already 2083 * to cfg80211 while that's in a locked section already
2591 * trying to tell us that the user wants to disconnect. 2084 * trying to tell us that the user wants to disconnect.
2592 */ 2085 */
2593 if (&ifmgd->associated->cbss != req->bss) { 2086 if (ifmgd->associated != req->bss) {
2594 mutex_unlock(&ifmgd->mtx); 2087 mutex_unlock(&ifmgd->mtx);
2595 return -ENOLINK; 2088 return -ENOLINK;
2596 } 2089 }
2597 2090
2598 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 2091 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
2599 sdata->dev->name, req->bss->bssid, req->reason_code); 2092 sdata->name, req->bss->bssid, req->reason_code);
2600 2093
2601 ieee80211_set_disassoc(sdata, false); 2094 ieee80211_set_disassoc(sdata);
2602 2095
2603 mutex_unlock(&ifmgd->mtx); 2096 mutex_unlock(&ifmgd->mtx);
2604 2097
@@ -2610,3 +2103,38 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2610 2103
2611 return 0; 2104 return 0;
2612} 2105}
2106
2107int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
2108 struct ieee80211_channel *chan,
2109 enum nl80211_channel_type channel_type,
2110 const u8 *buf, size_t len, u64 *cookie)
2111{
2112 struct ieee80211_local *local = sdata->local;
2113 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2114 struct sk_buff *skb;
2115
2116 /* Check that we are on the requested channel for transmission */
2117 if ((chan != local->tmp_channel ||
2118 channel_type != local->tmp_channel_type) &&
2119 (chan != local->oper_channel ||
2120 channel_type != local->oper_channel_type))
2121 return -EBUSY;
2122
2123 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
2124 if (!skb)
2125 return -ENOMEM;
2126 skb_reserve(skb, local->hw.extra_tx_headroom);
2127
2128 memcpy(skb_put(skb, len), buf, len);
2129
2130 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
2131 IEEE80211_SKB_CB(skb)->flags |=
2132 IEEE80211_TX_INTFL_DONT_ENCRYPT;
2133 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
2134 IEEE80211_TX_CTL_REQ_TX_STATUS;
2135 skb->dev = sdata->dev;
2136 ieee80211_tx_skb(sdata, skb);
2137
2138 *cookie = (unsigned long) skb;
2139 return 0;
2140}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
new file mode 100644
index 000000000000..c36b1911987a
--- /dev/null
+++ b/net/mac80211/offchannel.c
@@ -0,0 +1,170 @@
1/*
2 * Off-channel operation helpers
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#include <net/mac80211.h>
16#include "ieee80211_i.h"
17
18/*
19 * inform AP that we will go to sleep so that it will buffer the frames
20 * while we scan
21 */
22static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
23{
24 struct ieee80211_local *local = sdata->local;
25
26 local->offchannel_ps_enabled = false;
27
28 /* FIXME: what to do when local->pspolling is true? */
29
30 del_timer_sync(&local->dynamic_ps_timer);
31 cancel_work_sync(&local->dynamic_ps_enable_work);
32
33 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
34 local->offchannel_ps_enabled = true;
35 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
36 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
37 }
38
39 if (!(local->offchannel_ps_enabled) ||
40 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
41 /*
42 * If power save was enabled, no need to send a nullfunc
43 * frame because AP knows that we are sleeping. But if the
44 * hardware is creating the nullfunc frame for power save
45 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
46 * enabled) and power save was enabled, the firmware just
47 * sent a null frame with power save disabled. So we need
48 * to send a new nullfunc frame to inform the AP that we
49 * are again sleeping.
50 */
51 ieee80211_send_nullfunc(local, sdata, 1);
52}
53
54/* inform AP that we are awake again, unless power save is enabled */
55static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
56{
57 struct ieee80211_local *local = sdata->local;
58
59 if (!local->ps_sdata)
60 ieee80211_send_nullfunc(local, sdata, 0);
61 else if (local->offchannel_ps_enabled) {
62 /*
63 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
64 * will send a nullfunc frame with the powersave bit set
65 * even though the AP already knows that we are sleeping.
66 * This could be avoided by sending a null frame with power
67 * save bit disabled before enabling the power save, but
68 * this doesn't gain anything.
69 *
70 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
71 * to send a nullfunc frame because AP already knows that
72 * we are sleeping, let's just enable power save mode in
73 * hardware.
74 */
75 local->hw.conf.flags |= IEEE80211_CONF_PS;
76 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
77 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
78 /*
79 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
80 * had been running before leaving the operating channel,
81 * restart the timer now and send a nullfunc frame to inform
82 * the AP that we are awake.
83 */
84 ieee80211_send_nullfunc(local, sdata, 0);
85 mod_timer(&local->dynamic_ps_timer, jiffies +
86 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
87 }
88}
89
90void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
91{
92 struct ieee80211_sub_if_data *sdata;
93
94 mutex_lock(&local->iflist_mtx);
95 list_for_each_entry(sdata, &local->interfaces, list) {
96 if (!ieee80211_sdata_running(sdata))
97 continue;
98
99 /* disable beaconing */
100 if (sdata->vif.type == NL80211_IFTYPE_AP ||
101 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
102 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
103 ieee80211_bss_info_change_notify(
104 sdata, BSS_CHANGED_BEACON_ENABLED);
105
106 /*
107 * only handle non-STA interfaces here, STA interfaces
108 * are handled in ieee80211_offchannel_stop_station(),
109 * e.g., from the background scan state machine.
110 *
111 * In addition, do not stop monitor interface to allow it to be
112 * used from user space controlled off-channel operations.
113 */
114 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
115 sdata->vif.type != NL80211_IFTYPE_MONITOR)
116 netif_tx_stop_all_queues(sdata->dev);
117 }
118 mutex_unlock(&local->iflist_mtx);
119}
120
121void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
122{
123 struct ieee80211_sub_if_data *sdata;
124
125 /*
126 * notify the AP about us leaving the channel and stop all STA interfaces
127 */
128 mutex_lock(&local->iflist_mtx);
129 list_for_each_entry(sdata, &local->interfaces, list) {
130 if (!ieee80211_sdata_running(sdata))
131 continue;
132
133 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
134 netif_tx_stop_all_queues(sdata->dev);
135 if (sdata->u.mgd.associated)
136 ieee80211_offchannel_ps_enable(sdata);
137 }
138 }
139 mutex_unlock(&local->iflist_mtx);
140}
141
142void ieee80211_offchannel_return(struct ieee80211_local *local,
143 bool enable_beaconing)
144{
145 struct ieee80211_sub_if_data *sdata;
146
147 mutex_lock(&local->iflist_mtx);
148 list_for_each_entry(sdata, &local->interfaces, list) {
149 if (!ieee80211_sdata_running(sdata))
150 continue;
151
152 /* Tell AP we're back */
153 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
154 if (sdata->u.mgd.associated)
155 ieee80211_offchannel_ps_disable(sdata);
156 }
157
158 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
159 netif_tx_wake_all_queues(sdata->dev);
160
161 /* re-enable beaconing */
162 if (enable_beaconing &&
163 (sdata->vif.type == NL80211_IFTYPE_AP ||
164 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
165 sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
166 ieee80211_bss_info_change_notify(
167 sdata, BSS_CHANGED_BEACON_ENABLED);
168 }
169 mutex_unlock(&local->iflist_mtx);
170}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e535f1c988fe..0e64484e861c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,9 +10,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
10{ 10{
11 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
13 struct ieee80211_if_init_conf conf;
14 struct sta_info *sta; 13 struct sta_info *sta;
15 unsigned long flags;
16 14
17 ieee80211_scan_cancel(local); 15 ieee80211_scan_cancel(local);
18 16
@@ -56,22 +54,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
56 rcu_read_unlock(); 54 rcu_read_unlock();
57 55
58 /* remove STAs */ 56 /* remove STAs */
59 spin_lock_irqsave(&local->sta_lock, flags); 57 mutex_lock(&local->sta_mtx);
60 list_for_each_entry(sta, &local->sta_list, list) { 58 list_for_each_entry(sta, &local->sta_list, list) {
61 if (local->ops->sta_notify) { 59 if (sta->uploaded) {
62 sdata = sta->sdata; 60 sdata = sta->sdata;
63 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 61 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
64 sdata = container_of(sdata->bss, 62 sdata = container_of(sdata->bss,
65 struct ieee80211_sub_if_data, 63 struct ieee80211_sub_if_data,
66 u.ap); 64 u.ap);
67 65
68 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, 66 drv_sta_remove(local, sdata, &sta->sta);
69 &sta->sta);
70 } 67 }
71 68
72 mesh_plink_quiesce(sta); 69 mesh_plink_quiesce(sta);
73 } 70 }
74 spin_unlock_irqrestore(&local->sta_lock, flags); 71 mutex_unlock(&local->sta_mtx);
75 72
76 /* remove all interfaces */ 73 /* remove all interfaces */
77 list_for_each_entry(sdata, &local->interfaces, list) { 74 list_for_each_entry(sdata, &local->interfaces, list) {
@@ -93,17 +90,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
93 break; 90 break;
94 } 91 }
95 92
96 if (!netif_running(sdata->dev)) 93 if (!ieee80211_sdata_running(sdata))
97 continue; 94 continue;
98 95
99 /* disable beaconing */ 96 /* disable beaconing */
100 ieee80211_bss_info_change_notify(sdata, 97 ieee80211_bss_info_change_notify(sdata,
101 BSS_CHANGED_BEACON_ENABLED); 98 BSS_CHANGED_BEACON_ENABLED);
102 99
103 conf.vif = &sdata->vif; 100 drv_remove_interface(local, &sdata->vif);
104 conf.type = sdata->vif.type;
105 conf.mac_addr = sdata->dev->dev_addr;
106 drv_remove_interface(local, &conf);
107 } 101 }
108 102
109 /* stop hardware - this must stop RX */ 103 /* stop hardware - this must stop RX */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 12a2bff7dcdb..6d0bd198af19 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/slab.h>
13#include "rate.h" 14#include "rate.h"
14#include "ieee80211_i.h" 15#include "ieee80211_i.h"
15#include "debugfs.h" 16#include "debugfs.h"
@@ -145,7 +146,7 @@ static const struct file_operations rcname_ops = {
145}; 146};
146#endif 147#endif
147 148
148struct rate_control_ref *rate_control_alloc(const char *name, 149static struct rate_control_ref *rate_control_alloc(const char *name,
149 struct ieee80211_local *local) 150 struct ieee80211_local *local)
150{ 151{
151 struct dentry *debugfsdir = NULL; 152 struct dentry *debugfsdir = NULL;
@@ -207,6 +208,27 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
207 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc)); 208 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
208} 209}
209 210
211static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
212{
213 u8 i;
214
215 if (basic_rates == 0)
216 return; /* assume basic rates unknown and accept rate */
217 if (*idx < 0)
218 return;
219 if (basic_rates & (1 << *idx))
220 return; /* selected rate is a basic rate */
221
222 for (i = *idx + 1; i <= max_rate_idx; i++) {
223 if (basic_rates & (1 << i)) {
224 *idx = i;
225 return;
226 }
227 }
228
229 /* could not find a basic rate; use original selection */
230}
231
210bool rate_control_send_low(struct ieee80211_sta *sta, 232bool rate_control_send_low(struct ieee80211_sta *sta,
211 void *priv_sta, 233 void *priv_sta,
212 struct ieee80211_tx_rate_control *txrc) 234 struct ieee80211_tx_rate_control *txrc)
@@ -218,12 +240,48 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
218 info->control.rates[0].count = 240 info->control.rates[0].count =
219 (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 241 (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
220 1 : txrc->hw->max_rate_tries; 242 1 : txrc->hw->max_rate_tries;
243 if (!sta && txrc->ap)
244 rc_send_low_broadcast(&info->control.rates[0].idx,
245 txrc->bss_conf->basic_rates,
246 txrc->sband->n_bitrates);
221 return true; 247 return true;
222 } 248 }
223 return false; 249 return false;
224} 250}
225EXPORT_SYMBOL(rate_control_send_low); 251EXPORT_SYMBOL(rate_control_send_low);
226 252
253static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
254 int n_bitrates, u32 mask)
255{
256 int j;
257
258 /* See whether the selected rate or anything below it is allowed. */
259 for (j = rate->idx; j >= 0; j--) {
260 if (mask & (1 << j)) {
261 /* Okay, found a suitable rate. Use it. */
262 rate->idx = j;
263 return;
264 }
265 }
266
267 /* Try to find a higher rate that would be allowed */
268 for (j = rate->idx + 1; j < n_bitrates; j++) {
269 if (mask & (1 << j)) {
270 /* Okay, found a suitable rate. Use it. */
271 rate->idx = j;
272 return;
273 }
274 }
275
276 /*
277 * Uh.. No suitable rate exists. This should not really happen with
278 * sane TX rate mask configurations. However, should someone manage to
279 * configure supported rates and TX rate mask in incompatible way,
280 * allow the frame to be transmitted with whatever the rate control
281 * selected.
282 */
283}
284
227void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 285void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
228 struct sta_info *sta, 286 struct sta_info *sta,
229 struct ieee80211_tx_rate_control *txrc) 287 struct ieee80211_tx_rate_control *txrc)
@@ -233,6 +291,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
233 struct ieee80211_sta *ista = NULL; 291 struct ieee80211_sta *ista = NULL;
234 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 292 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
235 int i; 293 int i;
294 u32 mask;
236 295
237 if (sta) { 296 if (sta) {
238 ista = &sta->sta; 297 ista = &sta->sta;
@@ -248,23 +307,31 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
248 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 307 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
249 return; 308 return;
250 309
251 if (sta && sdata->force_unicast_rateidx > -1) { 310 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
252 info->control.rates[0].idx = sdata->force_unicast_rateidx;
253 } else {
254 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
255 info->flags |= IEEE80211_TX_INTFL_RCALGO;
256 }
257 311
258 /* 312 /*
259 * try to enforce the maximum rate the user wanted 313 * Try to enforce the rateidx mask the user wanted. skip this if the
314 * default mask (allow all rates) is used to save some processing for
315 * the common case.
260 */ 316 */
261 if (sdata->max_ratectrl_rateidx > -1) 317 mask = sdata->rc_rateidx_mask[info->band];
318 if (mask != (1 << txrc->sband->n_bitrates) - 1) {
319 if (sta) {
320 /* Filter out rates that the STA does not support */
321 mask &= sta->sta.supp_rates[info->band];
322 }
323 /*
324 * Make sure the rate index selected for each TX rate is
325 * included in the configured mask and change the rate indexes
326 * if needed.
327 */
262 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 328 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
329 /* Rate masking supports only legacy rates for now */
263 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) 330 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
264 continue; 331 continue;
265 info->control.rates[i].idx = 332 rate_idx_match_mask(&info->control.rates[i],
266 min_t(s8, info->control.rates[i].idx, 333 txrc->sband->n_bitrates, mask);
267 sdata->max_ratectrl_rateidx); 334 }
268 } 335 }
269 336
270 BUG_ON(info->control.rates[0].idx < 0); 337 BUG_ON(info->control.rates[0].idx < 0);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index cb9bd1f65e27..065a96190e32 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -26,10 +26,6 @@ struct rate_control_ref {
26 struct kref kref; 26 struct kref kref;
27}; 27};
28 28
29/* Get a reference to the rate control algorithm. If `name' is NULL, get the
30 * first available algorithm. */
31struct rate_control_ref *rate_control_alloc(const char *name,
32 struct ieee80211_local *local);
33void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 29void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
34 struct sta_info *sta, 30 struct sta_info *sta,
35 struct ieee80211_tx_rate_control *txrc); 31 struct ieee80211_tx_rate_control *txrc);
@@ -44,10 +40,11 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
44 struct rate_control_ref *ref = local->rate_ctrl; 40 struct rate_control_ref *ref = local->rate_ctrl;
45 struct ieee80211_sta *ista = &sta->sta; 41 struct ieee80211_sta *ista = &sta->sta;
46 void *priv_sta = sta->rate_ctrl_priv; 42 void *priv_sta = sta->rate_ctrl_priv;
47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
48 43
49 if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO)) 44 if (!ref)
50 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); 45 return;
46
47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
51} 48}
52 49
53 50
@@ -69,7 +66,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
69 66
70static inline void rate_control_rate_update(struct ieee80211_local *local, 67static inline void rate_control_rate_update(struct ieee80211_local *local,
71 struct ieee80211_supported_band *sband, 68 struct ieee80211_supported_band *sband,
72 struct sta_info *sta, u32 changed) 69 struct sta_info *sta, u32 changed,
70 enum nl80211_channel_type oper_chan_type)
73{ 71{
74 struct rate_control_ref *ref = local->rate_ctrl; 72 struct rate_control_ref *ref = local->rate_ctrl;
75 struct ieee80211_sta *ista = &sta->sta; 73 struct ieee80211_sta *ista = &sta->sta;
@@ -77,7 +75,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
77 75
78 if (ref && ref->ops->rate_update) 76 if (ref && ref->ops->rate_update)
79 ref->ops->rate_update(ref->priv, sband, ista, 77 ref->ops->rate_update(ref->priv, sband, ista,
80 priv_sta, changed); 78 priv_sta, changed, oper_chan_type);
81} 79}
82 80
83static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, 81static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
@@ -115,7 +113,8 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
115#endif 113#endif
116} 114}
117 115
118/* functions for rate control related to a device */ 116/* Get a reference to the rate control algorithm. If `name' is NULL, get the
117 * first available algorithm. */
119int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 118int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
120 const char *name); 119 const char *name);
121void rate_control_deinitialize(struct ieee80211_local *local); 120void rate_control_deinitialize(struct ieee80211_local *local);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 6e5d68b4e427..818abfae9007 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -50,6 +50,7 @@
50#include <linux/debugfs.h> 50#include <linux/debugfs.h>
51#include <linux/random.h> 51#include <linux/random.h>
52#include <linux/ieee80211.h> 52#include <linux/ieee80211.h>
53#include <linux/slab.h>
53#include <net/mac80211.h> 54#include <net/mac80211.h>
54#include "rate.h" 55#include "rate.h"
55#include "rc80211_minstrel.h" 56#include "rc80211_minstrel.h"
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index a715d9454f64..0e1f12b1b6dd 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -49,6 +49,7 @@
49#include <linux/skbuff.h> 49#include <linux/skbuff.h>
50#include <linux/debugfs.h> 50#include <linux/debugfs.h>
51#include <linux/ieee80211.h> 51#include <linux/ieee80211.h>
52#include <linux/slab.h>
52#include <net/mac80211.h> 53#include <net/mac80211.h>
53#include "rc80211_minstrel.h" 54#include "rc80211_minstrel.h"
54 55
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 29bc4c516238..aeda65466f3e 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -13,6 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/debugfs.h> 15#include <linux/debugfs.h>
16#include <linux/slab.h>
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17#include "rate.h" 18#include "rate.h"
18#include "mesh.h" 19#include "mesh.h"
@@ -157,9 +158,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
157 158
158 /* In case nothing happened during the previous control interval, turn 159 /* In case nothing happened during the previous control interval, turn
159 * the sharpening factor on. */ 160 * the sharpening factor on. */
160 period = (HZ * pinfo->sampling_period + 500) / 1000; 161 period = msecs_to_jiffies(pinfo->sampling_period);
161 if (!period)
162 period = 1;
163 if (jiffies - spinfo->last_sample > 2 * period) 162 if (jiffies - spinfo->last_sample > 2 * period)
164 spinfo->sharp_cnt = pinfo->sharpen_duration; 163 spinfo->sharp_cnt = pinfo->sharpen_duration;
165 164
@@ -252,9 +251,7 @@ static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_ba
252 } 251 }
253 252
254 /* Update PID controller state. */ 253 /* Update PID controller state. */
255 period = (HZ * pinfo->sampling_period + 500) / 1000; 254 period = msecs_to_jiffies(pinfo->sampling_period);
256 if (!period)
257 period = 1;
258 if (time_after(jiffies, spinfo->last_sample + period)) 255 if (time_after(jiffies, spinfo->last_sample + period))
259 rate_control_pid_sample(pinfo, sband, sta, spinfo); 256 rate_control_pid_sample(pinfo, sband, sta, spinfo);
260} 257}
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 45667054a5f3..47438b4a9af5 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -12,6 +12,7 @@
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15 16
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17#include "rate.h" 18#include "rate.h"
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 82a30c1bf3ab..04ea07f0e78a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2,7 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/jiffies.h> 12#include <linux/jiffies.h>
13#include <linux/slab.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/skbuff.h> 15#include <linux/skbuff.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
@@ -283,15 +284,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
283 skb->protocol = htons(ETH_P_802_2); 284 skb->protocol = htons(ETH_P_802_2);
284 285
285 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 286 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
286 if (!netif_running(sdata->dev))
287 continue;
288
289 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 287 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
290 continue; 288 continue;
291 289
292 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 290 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
293 continue; 291 continue;
294 292
293 if (!ieee80211_sdata_running(sdata))
294 continue;
295
295 if (prev_dev) { 296 if (prev_dev) {
296 skb2 = skb_clone(skb, GFP_ATOMIC); 297 skb2 = skb_clone(skb, GFP_ATOMIC);
297 if (skb2) { 298 if (skb2) {
@@ -361,7 +362,9 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
361 * boundary. In the case of regular frames, this simply means aligning the 362 * boundary. In the case of regular frames, this simply means aligning the
362 * payload to a four-byte boundary (because either the IP header is directly 363 * payload to a four-byte boundary (because either the IP header is directly
363 * contained, or IV/RFC1042 headers that have a length divisible by four are 364 * contained, or IV/RFC1042 headers that have a length divisible by four are
364 * in front of it). 365 * in front of it). If the payload data is not properly aligned and the
366 * architecture doesn't support efficient unaligned operations, mac80211
367 * will align the data.
365 * 368 *
366 * With A-MSDU frames, however, the payload data address must yield two modulo 369 * With A-MSDU frames, however, the payload data address must yield two modulo
367 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 370 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
@@ -375,25 +378,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
375 */ 378 */
376static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 379static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
377{ 380{
378 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 381#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
379 int hdrlen; 382 WARN_ONCE((unsigned long)rx->skb->data & 1,
380 383 "unaligned packet at 0x%p\n", rx->skb->data);
381#ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
382 return;
383#endif 384#endif
384
385 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
386 "unaligned packet at 0x%p\n", rx->skb->data))
387 return;
388
389 if (!ieee80211_is_data_present(hdr->frame_control))
390 return;
391
392 hdrlen = ieee80211_hdrlen(hdr->frame_control);
393 if (rx->flags & IEEE80211_RX_AMSDU)
394 hdrlen += ETH_HLEN;
395 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
396 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
397} 385}
398 386
399 387
@@ -476,7 +464,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
476{ 464{
477 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
478 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 466 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
479 char *dev_addr = rx->sdata->dev->dev_addr; 467 char *dev_addr = rx->sdata->vif.addr;
480 468
481 if (ieee80211_is_data(hdr->frame_control)) { 469 if (ieee80211_is_data(hdr->frame_control)) {
482 if (is_multicast_ether_addr(hdr->addr1)) { 470 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1021,10 +1009,10 @@ static void ap_sta_ps_start(struct sta_info *sta)
1021 1009
1022 atomic_inc(&sdata->bss->num_sta_ps); 1010 atomic_inc(&sdata->bss->num_sta_ps);
1023 set_sta_flags(sta, WLAN_STA_PS_STA); 1011 set_sta_flags(sta, WLAN_STA_PS_STA);
1024 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); 1012 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1025#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1013#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1026 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1014 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1027 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1015 sdata->name, sta->sta.addr, sta->sta.aid);
1028#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1016#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1029} 1017}
1030 1018
@@ -1038,13 +1026,13 @@ static void ap_sta_ps_end(struct sta_info *sta)
1038 1026
1039#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1027#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1040 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1028 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1041 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1029 sdata->name, sta->sta.addr, sta->sta.aid);
1042#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1030#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1043 1031
1044 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1032 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1045#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1033#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1046 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1034 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1047 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1035 sdata->name, sta->sta.addr, sta->sta.aid);
1048#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1036#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1049 return; 1037 return;
1050 } 1038 }
@@ -1124,6 +1112,18 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1124 if (ieee80211_is_nullfunc(hdr->frame_control) || 1112 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1125 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1113 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1126 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1114 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1115
1116 /*
1117 * If we receive a 4-addr nullfunc frame from a STA
1118 * that was not moved to a 4-addr STA vlan yet, drop
1119 * the frame to the monitor interface, to make sure
1120 * that hostapd sees it
1121 */
1122 if (ieee80211_has_a4(hdr->frame_control) &&
1123 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1124 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1125 !rx->sdata->u.vlan.sta)))
1126 return RX_DROP_MONITOR;
1127 /* 1127 /*
1128 * Update counter and free packet here to avoid 1128 * Update counter and free packet here to avoid
1129 * counting this as a dropped packed. 1129 * counting this as a dropped packed.
@@ -1156,7 +1156,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1156 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1156 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1157 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1157 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1158 "addr1=%pM addr2=%pM\n", 1158 "addr1=%pM addr2=%pM\n",
1159 sdata->dev->name, idx, 1159 sdata->name, idx,
1160 jiffies - entry->first_frag_time, entry->seq, 1160 jiffies - entry->first_frag_time, entry->seq,
1161 entry->last_frag, hdr->addr1, hdr->addr2); 1161 entry->last_frag, hdr->addr1, hdr->addr2);
1162#endif 1162#endif
@@ -1398,6 +1398,21 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1398 ieee80211_is_data(fc) && 1398 ieee80211_is_data(fc) &&
1399 (rx->key || rx->sdata->drop_unencrypted))) 1399 (rx->key || rx->sdata->drop_unencrypted)))
1400 return -EACCES; 1400 return -EACCES;
1401
1402 return 0;
1403}
1404
1405static int
1406ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1407{
1408 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1409 __le16 fc = hdr->frame_control;
1410 int res;
1411
1412 res = ieee80211_drop_unencrypted(rx, fc);
1413 if (unlikely(res))
1414 return res;
1415
1401 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1416 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1402 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1417 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1403 rx->key)) 1418 rx->key))
@@ -1424,7 +1439,6 @@ static int
1424__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1439__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1425{ 1440{
1426 struct ieee80211_sub_if_data *sdata = rx->sdata; 1441 struct ieee80211_sub_if_data *sdata = rx->sdata;
1427 struct net_device *dev = sdata->dev;
1428 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1442 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1429 1443
1430 if (ieee80211_has_a4(hdr->frame_control) && 1444 if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1436,7 +1450,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1436 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) 1450 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1437 return -1; 1451 return -1;
1438 1452
1439 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); 1453 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1440} 1454}
1441 1455
1442/* 1456/*
@@ -1453,7 +1467,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1453 * of whether the frame was encrypted or not. 1467 * of whether the frame was encrypted or not.
1454 */ 1468 */
1455 if (ehdr->h_proto == htons(ETH_P_PAE) && 1469 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1456 (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 || 1470 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1457 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1471 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1458 return true; 1472 return true;
1459 1473
@@ -1472,7 +1486,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1472{ 1486{
1473 struct ieee80211_sub_if_data *sdata = rx->sdata; 1487 struct ieee80211_sub_if_data *sdata = rx->sdata;
1474 struct net_device *dev = sdata->dev; 1488 struct net_device *dev = sdata->dev;
1475 struct ieee80211_local *local = rx->local;
1476 struct sk_buff *skb, *xmit_skb; 1489 struct sk_buff *skb, *xmit_skb;
1477 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1490 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1478 struct sta_info *dsta; 1491 struct sta_info *dsta;
@@ -1495,8 +1508,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1495 printk(KERN_DEBUG "%s: failed to clone " 1508 printk(KERN_DEBUG "%s: failed to clone "
1496 "multicast frame\n", dev->name); 1509 "multicast frame\n", dev->name);
1497 } else { 1510 } else {
1498 dsta = sta_info_get(local, skb->data); 1511 dsta = sta_info_get(sdata, skb->data);
1499 if (dsta && dsta->sdata->dev == dev) { 1512 if (dsta) {
1500 /* 1513 /*
1501 * The destination station is associated to 1514 * The destination station is associated to
1502 * this AP (in this VLAN), so send the frame 1515 * this AP (in this VLAN), so send the frame
@@ -1512,7 +1525,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1512 if (skb) { 1525 if (skb) {
1513 int align __maybe_unused; 1526 int align __maybe_unused;
1514 1527
1515#if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 1528#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1516 /* 1529 /*
1517 * 'align' will only take the values 0 or 2 here 1530 * 'align' will only take the values 0 or 2 here
1518 * since all frames are required to be aligned 1531 * since all frames are required to be aligned
@@ -1556,16 +1569,10 @@ static ieee80211_rx_result debug_noinline
1556ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1569ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1557{ 1570{
1558 struct net_device *dev = rx->sdata->dev; 1571 struct net_device *dev = rx->sdata->dev;
1559 struct ieee80211_local *local = rx->local; 1572 struct sk_buff *skb = rx->skb;
1560 u16 ethertype;
1561 u8 *payload;
1562 struct sk_buff *skb = rx->skb, *frame = NULL;
1563 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1573 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1564 __le16 fc = hdr->frame_control; 1574 __le16 fc = hdr->frame_control;
1565 const struct ethhdr *eth; 1575 struct sk_buff_head frame_list;
1566 int remaining, err;
1567 u8 dst[ETH_ALEN];
1568 u8 src[ETH_ALEN];
1569 1576
1570 if (unlikely(!ieee80211_is_data(fc))) 1577 if (unlikely(!ieee80211_is_data(fc)))
1571 return RX_CONTINUE; 1578 return RX_CONTINUE;
@@ -1576,94 +1583,34 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1576 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1583 if (!(rx->flags & IEEE80211_RX_AMSDU))
1577 return RX_CONTINUE; 1584 return RX_CONTINUE;
1578 1585
1579 err = __ieee80211_data_to_8023(rx); 1586 if (ieee80211_has_a4(hdr->frame_control) &&
1580 if (unlikely(err)) 1587 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1588 !rx->sdata->u.vlan.sta)
1581 return RX_DROP_UNUSABLE; 1589 return RX_DROP_UNUSABLE;
1582 1590
1583 skb->dev = dev; 1591 if (is_multicast_ether_addr(hdr->addr1) &&
1584 1592 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1585 dev->stats.rx_packets++; 1593 rx->sdata->u.vlan.sta) ||
1586 dev->stats.rx_bytes += skb->len; 1594 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1587 1595 rx->sdata->u.mgd.use_4addr)))
1588 /* skip the wrapping header */
1589 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1590 if (!eth)
1591 return RX_DROP_UNUSABLE; 1596 return RX_DROP_UNUSABLE;
1592 1597
1593 while (skb != frame) { 1598 skb->dev = dev;
1594 u8 padding; 1599 __skb_queue_head_init(&frame_list);
1595 __be16 len = eth->h_proto;
1596 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1597
1598 remaining = skb->len;
1599 memcpy(dst, eth->h_dest, ETH_ALEN);
1600 memcpy(src, eth->h_source, ETH_ALEN);
1601
1602 padding = ((4 - subframe_len) & 0x3);
1603 /* the last MSDU has no padding */
1604 if (subframe_len > remaining)
1605 return RX_DROP_UNUSABLE;
1606 1600
1607 skb_pull(skb, sizeof(struct ethhdr)); 1601 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1608 /* if last subframe reuse skb */ 1602 rx->sdata->vif.type,
1609 if (remaining <= subframe_len + padding) 1603 rx->local->hw.extra_tx_headroom);
1610 frame = skb;
1611 else {
1612 /*
1613 * Allocate and reserve two bytes more for payload
1614 * alignment since sizeof(struct ethhdr) is 14.
1615 */
1616 frame = dev_alloc_skb(
1617 ALIGN(local->hw.extra_tx_headroom, 4) +
1618 subframe_len + 2);
1619
1620 if (frame == NULL)
1621 return RX_DROP_UNUSABLE;
1622
1623 skb_reserve(frame,
1624 ALIGN(local->hw.extra_tx_headroom, 4) +
1625 sizeof(struct ethhdr) + 2);
1626 memcpy(skb_put(frame, ntohs(len)), skb->data,
1627 ntohs(len));
1628
1629 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1630 padding);
1631 if (!eth) {
1632 dev_kfree_skb(frame);
1633 return RX_DROP_UNUSABLE;
1634 }
1635 }
1636 1604
1637 skb_reset_network_header(frame); 1605 while (!skb_queue_empty(&frame_list)) {
1638 frame->dev = dev; 1606 rx->skb = __skb_dequeue(&frame_list);
1639 frame->priority = skb->priority;
1640 rx->skb = frame;
1641
1642 payload = frame->data;
1643 ethertype = (payload[6] << 8) | payload[7];
1644
1645 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1646 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1647 compare_ether_addr(payload,
1648 bridge_tunnel_header) == 0)) {
1649 /* remove RFC1042 or Bridge-Tunnel
1650 * encapsulation and replace EtherType */
1651 skb_pull(frame, 6);
1652 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1653 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1654 } else {
1655 memcpy(skb_push(frame, sizeof(__be16)),
1656 &len, sizeof(__be16));
1657 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1658 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1659 }
1660 1607
1661 if (!ieee80211_frame_allowed(rx, fc)) { 1608 if (!ieee80211_frame_allowed(rx, fc)) {
1662 if (skb == frame) /* last frame */ 1609 dev_kfree_skb(rx->skb);
1663 return RX_DROP_UNUSABLE;
1664 dev_kfree_skb(frame);
1665 continue; 1610 continue;
1666 } 1611 }
1612 dev->stats.rx_packets++;
1613 dev->stats.rx_bytes += rx->skb->len;
1667 1614
1668 ieee80211_deliver_skb(rx); 1615 ieee80211_deliver_skb(rx);
1669 } 1616 }
@@ -1721,7 +1668,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1721 1668
1722 /* Frame has reached destination. Don't forward */ 1669 /* Frame has reached destination. Don't forward */
1723 if (!is_multicast_ether_addr(hdr->addr1) && 1670 if (!is_multicast_ether_addr(hdr->addr1) &&
1724 compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0) 1671 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1725 return RX_CONTINUE; 1672 return RX_CONTINUE;
1726 1673
1727 mesh_hdr->ttl--; 1674 mesh_hdr->ttl--;
@@ -1738,10 +1685,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1738 1685
1739 if (!fwd_skb && net_ratelimit()) 1686 if (!fwd_skb && net_ratelimit())
1740 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1687 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1741 sdata->dev->name); 1688 sdata->name);
1742 1689
1743 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1690 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1744 memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN); 1691 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1745 info = IEEE80211_SKB_CB(fwd_skb); 1692 info = IEEE80211_SKB_CB(fwd_skb);
1746 memset(info, 0, sizeof(*info)); 1693 memset(info, 0, sizeof(*info));
1747 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1694 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1788,6 +1735,7 @@ static ieee80211_rx_result debug_noinline
1788ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1735ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1789{ 1736{
1790 struct ieee80211_sub_if_data *sdata = rx->sdata; 1737 struct ieee80211_sub_if_data *sdata = rx->sdata;
1738 struct ieee80211_local *local = rx->local;
1791 struct net_device *dev = sdata->dev; 1739 struct net_device *dev = sdata->dev;
1792 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1740 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1793 __le16 fc = hdr->frame_control; 1741 __le16 fc = hdr->frame_control;
@@ -1819,6 +1767,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1819 dev->stats.rx_packets++; 1767 dev->stats.rx_packets++;
1820 dev->stats.rx_bytes += rx->skb->len; 1768 dev->stats.rx_bytes += rx->skb->len;
1821 1769
1770 if (ieee80211_is_data(hdr->frame_control) &&
1771 !is_multicast_ether_addr(hdr->addr1) &&
1772 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
1773 mod_timer(&local->dynamic_ps_timer, jiffies +
1774 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1775 }
1776
1822 ieee80211_deliver_skb(rx); 1777 ieee80211_deliver_skb(rx);
1823 1778
1824 return RX_QUEUED; 1779 return RX_QUEUED;
@@ -1872,7 +1827,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1872 struct sk_buff *skb; 1827 struct sk_buff *skb;
1873 struct ieee80211_mgmt *resp; 1828 struct ieee80211_mgmt *resp;
1874 1829
1875 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) { 1830 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1876 /* Not to own unicast address */ 1831 /* Not to own unicast address */
1877 return; 1832 return;
1878 } 1833 }
@@ -1896,7 +1851,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1896 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 1851 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1897 memset(resp, 0, 24); 1852 memset(resp, 0, 24);
1898 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1853 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1899 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN); 1854 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1900 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 1855 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1901 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1856 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1902 IEEE80211_STYPE_ACTION); 1857 IEEE80211_STYPE_ACTION);
@@ -1916,23 +1871,25 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1916 struct ieee80211_local *local = rx->local; 1871 struct ieee80211_local *local = rx->local;
1917 struct ieee80211_sub_if_data *sdata = rx->sdata; 1872 struct ieee80211_sub_if_data *sdata = rx->sdata;
1918 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1873 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1874 struct sk_buff *nskb;
1875 struct ieee80211_rx_status *status;
1919 int len = rx->skb->len; 1876 int len = rx->skb->len;
1920 1877
1921 if (!ieee80211_is_action(mgmt->frame_control)) 1878 if (!ieee80211_is_action(mgmt->frame_control))
1922 return RX_CONTINUE; 1879 return RX_CONTINUE;
1923 1880
1924 if (!rx->sta) 1881 /* drop too small frames */
1925 return RX_DROP_MONITOR; 1882 if (len < IEEE80211_MIN_ACTION_SIZE)
1883 return RX_DROP_UNUSABLE;
1926 1884
1927 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1885 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
1928 return RX_DROP_MONITOR; 1886 return RX_DROP_UNUSABLE;
1929 1887
1930 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) 1888 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1931 return RX_DROP_MONITOR; 1889 return RX_DROP_UNUSABLE;
1932 1890
1933 /* all categories we currently handle have action_code */ 1891 if (ieee80211_drop_unencrypted_mgmt(rx))
1934 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1892 return RX_DROP_UNUSABLE;
1935 return RX_DROP_MONITOR;
1936 1893
1937 switch (mgmt->u.action.category) { 1894 switch (mgmt->u.action.category) {
1938 case WLAN_CATEGORY_BACK: 1895 case WLAN_CATEGORY_BACK:
@@ -1945,7 +1902,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1945 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1902 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1946 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1903 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1947 sdata->vif.type != NL80211_IFTYPE_AP) 1904 sdata->vif.type != NL80211_IFTYPE_AP)
1948 return RX_DROP_MONITOR; 1905 break;
1906
1907 /* verify action_code is present */
1908 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1909 break;
1949 1910
1950 switch (mgmt->u.action.u.addba_req.action_code) { 1911 switch (mgmt->u.action.u.addba_req.action_code) {
1951 case WLAN_ACTION_ADDBA_REQ: 1912 case WLAN_ACTION_ADDBA_REQ:
@@ -1953,45 +1914,49 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1953 sizeof(mgmt->u.action.u.addba_req))) 1914 sizeof(mgmt->u.action.u.addba_req)))
1954 return RX_DROP_MONITOR; 1915 return RX_DROP_MONITOR;
1955 ieee80211_process_addba_request(local, rx->sta, mgmt, len); 1916 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1956 break; 1917 goto handled;
1957 case WLAN_ACTION_ADDBA_RESP: 1918 case WLAN_ACTION_ADDBA_RESP:
1958 if (len < (IEEE80211_MIN_ACTION_SIZE + 1919 if (len < (IEEE80211_MIN_ACTION_SIZE +
1959 sizeof(mgmt->u.action.u.addba_resp))) 1920 sizeof(mgmt->u.action.u.addba_resp)))
1960 return RX_DROP_MONITOR; 1921 break;
1961 ieee80211_process_addba_resp(local, rx->sta, mgmt, len); 1922 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1962 break; 1923 goto handled;
1963 case WLAN_ACTION_DELBA: 1924 case WLAN_ACTION_DELBA:
1964 if (len < (IEEE80211_MIN_ACTION_SIZE + 1925 if (len < (IEEE80211_MIN_ACTION_SIZE +
1965 sizeof(mgmt->u.action.u.delba))) 1926 sizeof(mgmt->u.action.u.delba)))
1966 return RX_DROP_MONITOR; 1927 break;
1967 ieee80211_process_delba(sdata, rx->sta, mgmt, len); 1928 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1968 break; 1929 goto handled;
1969 } 1930 }
1970 break; 1931 break;
1971 case WLAN_CATEGORY_SPECTRUM_MGMT: 1932 case WLAN_CATEGORY_SPECTRUM_MGMT:
1972 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 1933 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1973 return RX_DROP_MONITOR; 1934 break;
1974 1935
1975 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1936 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1976 return RX_DROP_MONITOR; 1937 break;
1938
1939 /* verify action_code is present */
1940 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1941 break;
1977 1942
1978 switch (mgmt->u.action.u.measurement.action_code) { 1943 switch (mgmt->u.action.u.measurement.action_code) {
1979 case WLAN_ACTION_SPCT_MSR_REQ: 1944 case WLAN_ACTION_SPCT_MSR_REQ:
1980 if (len < (IEEE80211_MIN_ACTION_SIZE + 1945 if (len < (IEEE80211_MIN_ACTION_SIZE +
1981 sizeof(mgmt->u.action.u.measurement))) 1946 sizeof(mgmt->u.action.u.measurement)))
1982 return RX_DROP_MONITOR; 1947 break;
1983 ieee80211_process_measurement_req(sdata, mgmt, len); 1948 ieee80211_process_measurement_req(sdata, mgmt, len);
1984 break; 1949 goto handled;
1985 case WLAN_ACTION_SPCT_CHL_SWITCH: 1950 case WLAN_ACTION_SPCT_CHL_SWITCH:
1986 if (len < (IEEE80211_MIN_ACTION_SIZE + 1951 if (len < (IEEE80211_MIN_ACTION_SIZE +
1987 sizeof(mgmt->u.action.u.chan_switch))) 1952 sizeof(mgmt->u.action.u.chan_switch)))
1988 return RX_DROP_MONITOR; 1953 break;
1989 1954
1990 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1955 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1991 return RX_DROP_MONITOR; 1956 break;
1992 1957
1993 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 1958 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1994 return RX_DROP_MONITOR; 1959 break;
1995 1960
1996 return ieee80211_sta_rx_mgmt(sdata, rx->skb); 1961 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1997 } 1962 }
@@ -1999,30 +1964,69 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1999 case WLAN_CATEGORY_SA_QUERY: 1964 case WLAN_CATEGORY_SA_QUERY:
2000 if (len < (IEEE80211_MIN_ACTION_SIZE + 1965 if (len < (IEEE80211_MIN_ACTION_SIZE +
2001 sizeof(mgmt->u.action.u.sa_query))) 1966 sizeof(mgmt->u.action.u.sa_query)))
2002 return RX_DROP_MONITOR; 1967 break;
1968
2003 switch (mgmt->u.action.u.sa_query.action) { 1969 switch (mgmt->u.action.u.sa_query.action) {
2004 case WLAN_ACTION_SA_QUERY_REQUEST: 1970 case WLAN_ACTION_SA_QUERY_REQUEST:
2005 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1971 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2006 return RX_DROP_MONITOR; 1972 break;
2007 ieee80211_process_sa_query_req(sdata, mgmt, len); 1973 ieee80211_process_sa_query_req(sdata, mgmt, len);
2008 break; 1974 goto handled;
2009 case WLAN_ACTION_SA_QUERY_RESPONSE:
2010 /*
2011 * SA Query response is currently only used in AP mode
2012 * and it is processed in user space.
2013 */
2014 return RX_CONTINUE;
2015 } 1975 }
2016 break; 1976 break;
2017 default: 1977 case MESH_PLINK_CATEGORY:
2018 /* do not process rejected action frames */ 1978 case MESH_PATH_SEL_CATEGORY:
2019 if (mgmt->u.action.category & 0x80) 1979 if (ieee80211_vif_is_mesh(&sdata->vif))
2020 return RX_DROP_MONITOR; 1980 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
1981 break;
1982 }
2021 1983
2022 return RX_CONTINUE; 1984 /*
1985 * For AP mode, hostapd is responsible for handling any action
1986 * frames that we didn't handle, including returning unknown
1987 * ones. For all other modes we will return them to the sender,
1988 * setting the 0x80 bit in the action category, as required by
1989 * 802.11-2007 7.3.1.11.
1990 */
1991 if (sdata->vif.type == NL80211_IFTYPE_AP ||
1992 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1993 return RX_DROP_MONITOR;
1994
1995 /*
1996 * Getting here means the kernel doesn't know how to handle
1997 * it, but maybe userspace does ... include returned frames
1998 * so userspace can register for those to know whether ones
1999 * it transmitted were processed or returned.
2000 */
2001 status = IEEE80211_SKB_RXCB(rx->skb);
2002
2003 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2004 cfg80211_rx_action(rx->sdata->dev, status->freq,
2005 rx->skb->data, rx->skb->len,
2006 GFP_ATOMIC))
2007 goto handled;
2008
2009 /* do not return rejected action frames */
2010 if (mgmt->u.action.category & 0x80)
2011 return RX_DROP_UNUSABLE;
2012
2013 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2014 GFP_ATOMIC);
2015 if (nskb) {
2016 struct ieee80211_mgmt *mgmt = (void *)nskb->data;
2017
2018 mgmt->u.action.category |= 0x80;
2019 memcpy(mgmt->da, mgmt->sa, ETH_ALEN);
2020 memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2021
2022 memset(nskb->cb, 0, sizeof(nskb->cb));
2023
2024 ieee80211_tx_skb(rx->sdata, nskb);
2023 } 2025 }
2024 2026
2025 rx->sta->rx_packets++; 2027 handled:
2028 if (rx->sta)
2029 rx->sta->rx_packets++;
2026 dev_kfree_skb(rx->skb); 2030 dev_kfree_skb(rx->skb);
2027 return RX_QUEUED; 2031 return RX_QUEUED;
2028} 2032}
@@ -2031,13 +2035,17 @@ static ieee80211_rx_result debug_noinline
2031ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2035ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2032{ 2036{
2033 struct ieee80211_sub_if_data *sdata = rx->sdata; 2037 struct ieee80211_sub_if_data *sdata = rx->sdata;
2034 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2038 ieee80211_rx_result rxs;
2035 2039
2036 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 2040 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2037 return RX_DROP_MONITOR; 2041 return RX_DROP_MONITOR;
2038 2042
2039 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) 2043 if (ieee80211_drop_unencrypted_mgmt(rx))
2040 return RX_DROP_MONITOR; 2044 return RX_DROP_UNUSABLE;
2045
2046 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2047 if (rxs != RX_CONTINUE)
2048 return rxs;
2041 2049
2042 if (ieee80211_vif_is_mesh(&sdata->vif)) 2050 if (ieee80211_vif_is_mesh(&sdata->vif))
2043 return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 2051 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
@@ -2143,7 +2151,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2143 skb->protocol = htons(ETH_P_802_2); 2151 skb->protocol = htons(ETH_P_802_2);
2144 2152
2145 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2153 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2146 if (!netif_running(sdata->dev)) 2154 if (!ieee80211_sdata_running(sdata))
2147 continue; 2155 continue;
2148 2156
2149 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2157 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
@@ -2280,7 +2288,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2280 if (!bssid && !sdata->u.mgd.use_4addr) 2288 if (!bssid && !sdata->u.mgd.use_4addr)
2281 return 0; 2289 return 0;
2282 if (!multicast && 2290 if (!multicast &&
2283 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { 2291 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2284 if (!(sdata->dev->flags & IFF_PROMISC)) 2292 if (!(sdata->dev->flags & IFF_PROMISC))
2285 return 0; 2293 return 0;
2286 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2294 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2297,7 +2305,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2297 return 0; 2305 return 0;
2298 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2306 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2299 } else if (!multicast && 2307 } else if (!multicast &&
2300 compare_ether_addr(sdata->dev->dev_addr, 2308 compare_ether_addr(sdata->vif.addr,
2301 hdr->addr1) != 0) { 2309 hdr->addr1) != 0) {
2302 if (!(sdata->dev->flags & IFF_PROMISC)) 2310 if (!(sdata->dev->flags & IFF_PROMISC))
2303 return 0; 2311 return 0;
@@ -2308,13 +2316,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2308 rate_idx = 0; /* TODO: HT rates */ 2316 rate_idx = 0; /* TODO: HT rates */
2309 else 2317 else
2310 rate_idx = status->rate_idx; 2318 rate_idx = status->rate_idx;
2311 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2, 2319 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2312 BIT(rate_idx)); 2320 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2313 } 2321 }
2314 break; 2322 break;
2315 case NL80211_IFTYPE_MESH_POINT: 2323 case NL80211_IFTYPE_MESH_POINT:
2316 if (!multicast && 2324 if (!multicast &&
2317 compare_ether_addr(sdata->dev->dev_addr, 2325 compare_ether_addr(sdata->vif.addr,
2318 hdr->addr1) != 0) { 2326 hdr->addr1) != 0) {
2319 if (!(sdata->dev->flags & IFF_PROMISC)) 2327 if (!(sdata->dev->flags & IFF_PROMISC))
2320 return 0; 2328 return 0;
@@ -2325,11 +2333,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2325 case NL80211_IFTYPE_AP_VLAN: 2333 case NL80211_IFTYPE_AP_VLAN:
2326 case NL80211_IFTYPE_AP: 2334 case NL80211_IFTYPE_AP:
2327 if (!bssid) { 2335 if (!bssid) {
2328 if (compare_ether_addr(sdata->dev->dev_addr, 2336 if (compare_ether_addr(sdata->vif.addr,
2329 hdr->addr1)) 2337 hdr->addr1))
2330 return 0; 2338 return 0;
2331 } else if (!ieee80211_bssid_match(bssid, 2339 } else if (!ieee80211_bssid_match(bssid,
2332 sdata->dev->dev_addr)) { 2340 sdata->vif.addr)) {
2333 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 2341 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2334 return 0; 2342 return 0;
2335 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2343 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2368,6 +2376,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2368 int prepares; 2376 int prepares;
2369 struct ieee80211_sub_if_data *prev = NULL; 2377 struct ieee80211_sub_if_data *prev = NULL;
2370 struct sk_buff *skb_new; 2378 struct sk_buff *skb_new;
2379 struct sta_info *sta, *tmp;
2380 bool found_sta = false;
2371 2381
2372 hdr = (struct ieee80211_hdr *)skb->data; 2382 hdr = (struct ieee80211_hdr *)skb->data;
2373 memset(&rx, 0, sizeof(rx)); 2383 memset(&rx, 0, sizeof(rx));
@@ -2384,68 +2394,87 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2384 ieee80211_parse_qos(&rx); 2394 ieee80211_parse_qos(&rx);
2385 ieee80211_verify_alignment(&rx); 2395 ieee80211_verify_alignment(&rx);
2386 2396
2387 rx.sta = sta_info_get(local, hdr->addr2); 2397 if (ieee80211_is_data(hdr->frame_control)) {
2388 if (rx.sta) 2398 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2389 rx.sdata = rx.sta->sdata; 2399 rx.sta = sta;
2390 2400 found_sta = true;
2391 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) { 2401 rx.sdata = sta->sdata;
2392 rx.flags |= IEEE80211_RX_RA_MATCH; 2402
2393 prepares = prepare_for_handlers(rx.sdata, &rx, hdr); 2403 rx.flags |= IEEE80211_RX_RA_MATCH;
2394 if (prepares) { 2404 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2395 if (status->flag & RX_FLAG_MMIC_ERROR) { 2405 if (prepares) {
2396 if (rx.flags & IEEE80211_RX_RA_MATCH) 2406 if (status->flag & RX_FLAG_MMIC_ERROR) {
2397 ieee80211_rx_michael_mic_report(hdr, &rx); 2407 if (rx.flags & IEEE80211_RX_RA_MATCH)
2398 } else 2408 ieee80211_rx_michael_mic_report(hdr, &rx);
2399 prev = rx.sdata; 2409 } else
2410 prev = rx.sdata;
2411 }
2400 } 2412 }
2401 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2413 }
2402 if (!netif_running(sdata->dev)) 2414 if (!found_sta) {
2403 continue; 2415 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2416 if (!ieee80211_sdata_running(sdata))
2417 continue;
2404 2418
2405 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2419 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2406 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2420 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2407 continue; 2421 continue;
2408 2422
2409 rx.flags |= IEEE80211_RX_RA_MATCH; 2423 /*
2410 prepares = prepare_for_handlers(sdata, &rx, hdr); 2424 * frame is destined for this interface, but if it's
2425 * not also for the previous one we handle that after
2426 * the loop to avoid copying the SKB once too much
2427 */
2411 2428
2412 if (!prepares) 2429 if (!prev) {
2413 continue; 2430 prev = sdata;
2431 continue;
2432 }
2414 2433
2415 if (status->flag & RX_FLAG_MMIC_ERROR) { 2434 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2416 rx.sdata = sdata;
2417 if (rx.flags & IEEE80211_RX_RA_MATCH)
2418 ieee80211_rx_michael_mic_report(hdr, &rx);
2419 continue;
2420 }
2421 2435
2422 /* 2436 rx.flags |= IEEE80211_RX_RA_MATCH;
2423 * frame is destined for this interface, but if it's not 2437 prepares = prepare_for_handlers(prev, &rx, hdr);
2424 * also for the previous one we handle that after the 2438
2425 * loop to avoid copying the SKB once too much 2439 if (!prepares)
2426 */ 2440 goto next;
2441
2442 if (status->flag & RX_FLAG_MMIC_ERROR) {
2443 rx.sdata = prev;
2444 if (rx.flags & IEEE80211_RX_RA_MATCH)
2445 ieee80211_rx_michael_mic_report(hdr,
2446 &rx);
2447 goto next;
2448 }
2427 2449
2428 if (!prev) { 2450 /*
2451 * frame was destined for the previous interface
2452 * so invoke RX handlers for it
2453 */
2454
2455 skb_new = skb_copy(skb, GFP_ATOMIC);
2456 if (!skb_new) {
2457 if (net_ratelimit())
2458 printk(KERN_DEBUG "%s: failed to copy "
2459 "multicast frame for %s\n",
2460 wiphy_name(local->hw.wiphy),
2461 prev->name);
2462 goto next;
2463 }
2464 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2465next:
2429 prev = sdata; 2466 prev = sdata;
2430 continue;
2431 } 2467 }
2432 2468
2433 /* 2469 if (prev) {
2434 * frame was destined for the previous interface 2470 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2435 * so invoke RX handlers for it
2436 */
2437 2471
2438 skb_new = skb_copy(skb, GFP_ATOMIC); 2472 rx.flags |= IEEE80211_RX_RA_MATCH;
2439 if (!skb_new) { 2473 prepares = prepare_for_handlers(prev, &rx, hdr);
2440 if (net_ratelimit()) 2474
2441 printk(KERN_DEBUG "%s: failed to copy " 2475 if (!prepares)
2442 "multicast frame for %s\n", 2476 prev = NULL;
2443 wiphy_name(local->hw.wiphy),
2444 prev->dev->name);
2445 continue;
2446 } 2477 }
2447 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2448 prev = sdata;
2449 } 2478 }
2450 if (prev) 2479 if (prev)
2451 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); 2480 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bc17cf7d68db..85507bd9e341 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,9 +12,9 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/wireless.h>
16#include <linux/if_arp.h> 15#include <linux/if_arp.h>
17#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/slab.h>
18#include <net/mac80211.h> 18#include <net/mac80211.h>
19 19
20#include "ieee80211_i.h" 20#include "ieee80211_i.h"
@@ -29,16 +29,19 @@ struct ieee80211_bss *
29ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, 29ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
30 u8 *ssid, u8 ssid_len) 30 u8 *ssid, u8 ssid_len)
31{ 31{
32 return (void *)cfg80211_get_bss(local->hw.wiphy, 32 struct cfg80211_bss *cbss;
33 ieee80211_get_channel(local->hw.wiphy, 33
34 freq), 34 cbss = cfg80211_get_bss(local->hw.wiphy,
35 bssid, ssid, ssid_len, 35 ieee80211_get_channel(local->hw.wiphy, freq),
36 0, 0); 36 bssid, ssid, ssid_len, 0, 0);
37 if (!cbss)
38 return NULL;
39 return (void *)cbss->priv;
37} 40}
38 41
39static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) 42static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
40{ 43{
41 struct ieee80211_bss *bss = (void *)cbss; 44 struct ieee80211_bss *bss = (void *)cbss->priv;
42 45
43 kfree(bss_mesh_id(bss)); 46 kfree(bss_mesh_id(bss));
44 kfree(bss_mesh_cfg(bss)); 47 kfree(bss_mesh_cfg(bss));
@@ -47,7 +50,26 @@ static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
47void ieee80211_rx_bss_put(struct ieee80211_local *local, 50void ieee80211_rx_bss_put(struct ieee80211_local *local,
48 struct ieee80211_bss *bss) 51 struct ieee80211_bss *bss)
49{ 52{
50 cfg80211_put_bss((struct cfg80211_bss *)bss); 53 if (!bss)
54 return;
55 cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv));
56}
57
58static bool is_uapsd_supported(struct ieee802_11_elems *elems)
59{
60 u8 qos_info;
61
62 if (elems->wmm_info && elems->wmm_info_len == 7
63 && elems->wmm_info[5] == 1)
64 qos_info = elems->wmm_info[6];
65 else if (elems->wmm_param && elems->wmm_param_len == 24
66 && elems->wmm_param[5] == 1)
67 qos_info = elems->wmm_param[6];
68 else
69 /* no valid wmm information or parameter element found */
70 return false;
71
72 return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
51} 73}
52 74
53struct ieee80211_bss * 75struct ieee80211_bss *
@@ -59,6 +81,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
59 struct ieee80211_channel *channel, 81 struct ieee80211_channel *channel,
60 bool beacon) 82 bool beacon)
61{ 83{
84 struct cfg80211_bss *cbss;
62 struct ieee80211_bss *bss; 85 struct ieee80211_bss *bss;
63 int clen; 86 int clen;
64 s32 signal = 0; 87 s32 signal = 0;
@@ -68,13 +91,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
68 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 91 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
69 signal = (rx_status->signal * 100) / local->hw.max_signal; 92 signal = (rx_status->signal * 100) / local->hw.max_signal;
70 93
71 bss = (void *)cfg80211_inform_bss_frame(local->hw.wiphy, channel, 94 cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
72 mgmt, len, signal, GFP_ATOMIC); 95 mgmt, len, signal, GFP_ATOMIC);
73 96
74 if (!bss) 97 if (!cbss)
75 return NULL; 98 return NULL;
76 99
77 bss->cbss.free_priv = ieee80211_rx_bss_free; 100 cbss->free_priv = ieee80211_rx_bss_free;
101 bss = (void *)cbss->priv;
78 102
79 /* save the ERP value so that it is available at association time */ 103 /* save the ERP value so that it is available at association time */
80 if (elems->erp_info && elems->erp_info_len >= 1) { 104 if (elems->erp_info && elems->erp_info_len >= 1) {
@@ -88,10 +112,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
88 bss->dtim_period = tim_ie->dtim_period; 112 bss->dtim_period = tim_ie->dtim_period;
89 } 113 }
90 114
91 /* set default value for buggy AP/no TIM element */
92 if (bss->dtim_period == 0)
93 bss->dtim_period = 1;
94
95 bss->supp_rates_len = 0; 115 bss->supp_rates_len = 0;
96 if (elems->supp_rates) { 116 if (elems->supp_rates) {
97 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 117 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
@@ -111,6 +131,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
111 } 131 }
112 132
113 bss->wmm_used = elems->wmm_param || elems->wmm_info; 133 bss->wmm_used = elems->wmm_param || elems->wmm_info;
134 bss->uapsd_supported = is_uapsd_supported(elems);
114 135
115 if (!beacon) 136 if (!beacon)
116 bss->last_probe_resp = jiffies; 137 bss->last_probe_resp = jiffies;
@@ -147,7 +168,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
147 presp = ieee80211_is_probe_resp(fc); 168 presp = ieee80211_is_probe_resp(fc);
148 if (presp) { 169 if (presp) {
149 /* ignore ProbeResp to foreign address */ 170 /* ignore ProbeResp to foreign address */
150 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 171 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
151 return RX_DROP_MONITOR; 172 return RX_DROP_MONITOR;
152 173
153 presp = true; 174 presp = true;
@@ -220,82 +241,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
220 return true; 241 return true;
221} 242}
222 243
223/*
224 * inform AP that we will go to sleep so that it will buffer the frames
225 * while we scan
226 */
227static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
228{
229 struct ieee80211_local *local = sdata->local;
230
231 local->scan_ps_enabled = false;
232
233 /* FIXME: what to do when local->pspolling is true? */
234
235 del_timer_sync(&local->dynamic_ps_timer);
236 cancel_work_sync(&local->dynamic_ps_enable_work);
237
238 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
239 local->scan_ps_enabled = true;
240 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
241 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
242 }
243
244 if (!(local->scan_ps_enabled) ||
245 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
246 /*
247 * If power save was enabled, no need to send a nullfunc
248 * frame because AP knows that we are sleeping. But if the
249 * hardware is creating the nullfunc frame for power save
250 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
251 * enabled) and power save was enabled, the firmware just
252 * sent a null frame with power save disabled. So we need
253 * to send a new nullfunc frame to inform the AP that we
254 * are again sleeping.
255 */
256 ieee80211_send_nullfunc(local, sdata, 1);
257}
258
259/* inform AP that we are awake again, unless power save is enabled */
260static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
261{
262 struct ieee80211_local *local = sdata->local;
263
264 if (!local->ps_sdata)
265 ieee80211_send_nullfunc(local, sdata, 0);
266 else if (local->scan_ps_enabled) {
267 /*
268 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
269 * will send a nullfunc frame with the powersave bit set
270 * even though the AP already knows that we are sleeping.
271 * This could be avoided by sending a null frame with power
272 * save bit disabled before enabling the power save, but
273 * this doesn't gain anything.
274 *
275 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
276 * to send a nullfunc frame because AP already knows that
277 * we are sleeping, let's just enable power save mode in
278 * hardware.
279 */
280 local->hw.conf.flags |= IEEE80211_CONF_PS;
281 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
282 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
283 /*
284 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
285 * had been running before leaving the operating channel,
286 * restart the timer now and send a nullfunc frame to inform
287 * the AP that we are awake.
288 */
289 ieee80211_send_nullfunc(local, sdata, 0);
290 mod_timer(&local->dynamic_ps_timer, jiffies +
291 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
292 }
293}
294
295void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 244void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
296{ 245{
297 struct ieee80211_local *local = hw_to_local(hw); 246 struct ieee80211_local *local = hw_to_local(hw);
298 struct ieee80211_sub_if_data *sdata;
299 bool was_hw_scan; 247 bool was_hw_scan;
300 248
301 mutex_lock(&local->scan_mtx); 249 mutex_lock(&local->scan_mtx);
@@ -344,41 +292,19 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
344 292
345 drv_sw_scan_complete(local); 293 drv_sw_scan_complete(local);
346 294
347 mutex_lock(&local->iflist_mtx); 295 ieee80211_offchannel_return(local, true);
348 list_for_each_entry(sdata, &local->interfaces, list) {
349 if (!netif_running(sdata->dev))
350 continue;
351
352 /* Tell AP we're back */
353 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
354 if (sdata->u.mgd.associated) {
355 ieee80211_scan_ps_disable(sdata);
356 netif_tx_wake_all_queues(sdata->dev);
357 }
358 } else
359 netif_tx_wake_all_queues(sdata->dev);
360
361 /* re-enable beaconing */
362 if (sdata->vif.type == NL80211_IFTYPE_AP ||
363 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
364 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
365 ieee80211_bss_info_change_notify(
366 sdata, BSS_CHANGED_BEACON_ENABLED);
367 }
368 mutex_unlock(&local->iflist_mtx);
369 296
370 done: 297 done:
371 ieee80211_recalc_idle(local); 298 ieee80211_recalc_idle(local);
372 ieee80211_mlme_notify_scan_completed(local); 299 ieee80211_mlme_notify_scan_completed(local);
373 ieee80211_ibss_notify_scan_completed(local); 300 ieee80211_ibss_notify_scan_completed(local);
374 ieee80211_mesh_notify_scan_completed(local); 301 ieee80211_mesh_notify_scan_completed(local);
302 ieee80211_queue_work(&local->hw, &local->work_work);
375} 303}
376EXPORT_SYMBOL(ieee80211_scan_completed); 304EXPORT_SYMBOL(ieee80211_scan_completed);
377 305
378static int ieee80211_start_sw_scan(struct ieee80211_local *local) 306static int ieee80211_start_sw_scan(struct ieee80211_local *local)
379{ 307{
380 struct ieee80211_sub_if_data *sdata;
381
382 /* 308 /*
383 * Hardware/driver doesn't support hw_scan, so use software 309 * Hardware/driver doesn't support hw_scan, so use software
384 * scanning instead. First send a nullfunc frame with power save 310 * scanning instead. First send a nullfunc frame with power save
@@ -394,33 +320,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
394 */ 320 */
395 drv_sw_scan_start(local); 321 drv_sw_scan_start(local);
396 322
397 mutex_lock(&local->iflist_mtx); 323 ieee80211_offchannel_stop_beaconing(local);
398 list_for_each_entry(sdata, &local->interfaces, list) {
399 if (!netif_running(sdata->dev))
400 continue;
401
402 /* disable beaconing */
403 if (sdata->vif.type == NL80211_IFTYPE_AP ||
404 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
405 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
406 ieee80211_bss_info_change_notify(
407 sdata, BSS_CHANGED_BEACON_ENABLED);
408
409 /*
410 * only handle non-STA interfaces here, STA interfaces
411 * are handled in the scan state machine
412 */
413 if (sdata->vif.type != NL80211_IFTYPE_STATION)
414 netif_tx_stop_all_queues(sdata->dev);
415 }
416 mutex_unlock(&local->iflist_mtx);
417 324
418 local->next_scan_state = SCAN_DECISION; 325 local->next_scan_state = SCAN_DECISION;
419 local->scan_channel_idx = 0; 326 local->scan_channel_idx = 0;
420 327
328 drv_flush(local, false);
329
421 ieee80211_configure_filter(local); 330 ieee80211_configure_filter(local);
422 331
423 /* TODO: start scan as soon as all nullfunc frames are ACKed */
424 ieee80211_queue_delayed_work(&local->hw, 332 ieee80211_queue_delayed_work(&local->hw,
425 &local->scan_work, 333 &local->scan_work,
426 IEEE80211_CHANNEL_TIME); 334 IEEE80211_CHANNEL_TIME);
@@ -433,17 +341,13 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
433 struct cfg80211_scan_request *req) 341 struct cfg80211_scan_request *req)
434{ 342{
435 struct ieee80211_local *local = sdata->local; 343 struct ieee80211_local *local = sdata->local;
436 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
437 int rc; 344 int rc;
438 345
439 if (local->scan_req) 346 if (local->scan_req)
440 return -EBUSY; 347 return -EBUSY;
441 348
442 if (req != local->int_scan_req && 349 if (!list_empty(&local->work_list)) {
443 sdata->vif.type == NL80211_IFTYPE_STATION && 350 /* wait for the work to finish/time out */
444 !list_empty(&ifmgd->work_list)) {
445 /* actually wait for the work it's doing to finish/time out */
446 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
447 local->scan_req = req; 351 local->scan_req = req;
448 local->scan_sdata = sdata; 352 local->scan_sdata = sdata;
449 return 0; 353 return 0;
@@ -468,6 +372,14 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
468 local->hw_scan_req->ie = ies; 372 local->hw_scan_req->ie = ies;
469 373
470 local->hw_scan_band = 0; 374 local->hw_scan_band = 0;
375
376 /*
377 * After allocating local->hw_scan_req, we must
378 * go through until ieee80211_prep_hw_scan(), so
379 * anything that might be changed here and leave
380 * this function early must not go after this
381 * allocation.
382 */
471 } 383 }
472 384
473 local->scan_req = req; 385 local->scan_req = req;
@@ -477,15 +389,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
477 __set_bit(SCAN_HW_SCANNING, &local->scanning); 389 __set_bit(SCAN_HW_SCANNING, &local->scanning);
478 else 390 else
479 __set_bit(SCAN_SW_SCANNING, &local->scanning); 391 __set_bit(SCAN_SW_SCANNING, &local->scanning);
392
480 /* 393 /*
481 * Kicking off the scan need not be protected, 394 * Kicking off the scan need not be protected,
482 * only the scan variable stuff, since now 395 * only the scan variable stuff, since now
483 * local->scan_req is assigned and other callers 396 * local->scan_req is assigned and other callers
484 * will abort their scan attempts. 397 * will abort their scan attempts.
485 * 398 *
486 * This avoids getting a scan_mtx -> iflist_mtx 399 * This avoids too many locking dependencies
487 * dependency, so that the scan completed calls 400 * so that the scan completed calls have more
488 * have more locking freedom. 401 * locking freedom.
489 */ 402 */
490 403
491 ieee80211_recalc_idle(local); 404 ieee80211_recalc_idle(local);
@@ -528,7 +441,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
528 /* check if at least one STA interface is associated */ 441 /* check if at least one STA interface is associated */
529 mutex_lock(&local->iflist_mtx); 442 mutex_lock(&local->iflist_mtx);
530 list_for_each_entry(sdata, &local->interfaces, list) { 443 list_for_each_entry(sdata, &local->interfaces, list) {
531 if (!netif_running(sdata->dev)) 444 if (!ieee80211_sdata_running(sdata))
532 continue; 445 continue;
533 446
534 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 447 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -566,56 +479,35 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
566static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, 479static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
567 unsigned long *next_delay) 480 unsigned long *next_delay)
568{ 481{
569 struct ieee80211_sub_if_data *sdata; 482 ieee80211_offchannel_stop_station(local);
483
484 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
570 485
571 /* 486 /*
572 * notify the AP about us leaving the channel and stop all STA interfaces 487 * What if the nullfunc frames didn't arrive?
573 */ 488 */
574 mutex_lock(&local->iflist_mtx); 489 drv_flush(local, false);
575 list_for_each_entry(sdata, &local->interfaces, list) { 490 if (local->ops->flush)
576 if (!netif_running(sdata->dev)) 491 *next_delay = 0;
577 continue; 492 else
578 493 *next_delay = HZ / 10;
579 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
580 netif_tx_stop_all_queues(sdata->dev);
581 if (sdata->u.mgd.associated)
582 ieee80211_scan_ps_enable(sdata);
583 }
584 }
585 mutex_unlock(&local->iflist_mtx);
586
587 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
588 494
589 /* advance to the next channel to be scanned */ 495 /* advance to the next channel to be scanned */
590 *next_delay = HZ / 10;
591 local->next_scan_state = SCAN_SET_CHANNEL; 496 local->next_scan_state = SCAN_SET_CHANNEL;
592} 497}
593 498
594static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, 499static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
595 unsigned long *next_delay) 500 unsigned long *next_delay)
596{ 501{
597 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
598
599 /* switch back to the operating channel */ 502 /* switch back to the operating channel */
600 local->scan_channel = NULL; 503 local->scan_channel = NULL;
601 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 504 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
602 505
603 /* 506 /*
604 * notify the AP about us being back and restart all STA interfaces 507 * Only re-enable station mode interface now; beaconing will be
508 * re-enabled once the full scan has been completed.
605 */ 509 */
606 mutex_lock(&local->iflist_mtx); 510 ieee80211_offchannel_return(local, false);
607 list_for_each_entry(sdata, &local->interfaces, list) {
608 if (!netif_running(sdata->dev))
609 continue;
610
611 /* Tell AP we're back */
612 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
613 if (sdata->u.mgd.associated)
614 ieee80211_scan_ps_disable(sdata);
615 netif_tx_wake_all_queues(sdata->dev);
616 }
617 }
618 mutex_unlock(&local->iflist_mtx);
619 511
620 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning); 512 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
621 513
@@ -729,7 +621,7 @@ void ieee80211_scan_work(struct work_struct *work)
729 /* 621 /*
730 * Avoid re-scheduling when the sdata is going away. 622 * Avoid re-scheduling when the sdata is going away.
731 */ 623 */
732 if (!netif_running(sdata->dev)) { 624 if (!ieee80211_sdata_running(sdata)) {
733 ieee80211_scan_completed(&local->hw, true); 625 ieee80211_scan_completed(&local->hw, true);
734 return; 626 return;
735 } 627 }
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index aa743a895cf9..7733f66ee2c4 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -35,7 +35,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
35 35
36 if (!skb) { 36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for " 37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->dev->name); 38 "measurement report frame\n", sdata->name);
39 return; 39 return;
40 } 40 }
41 41
@@ -43,7 +43,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); 43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
44 memset(msr_report, 0, 24); 44 memset(msr_report, 0, 24);
45 memcpy(msr_report->da, da, ETH_ALEN); 45 memcpy(msr_report->da, da, ETH_ALEN);
46 memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN); 46 memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
47 memcpy(msr_report->bssid, bssid, ETH_ALEN); 47 memcpy(msr_report->bssid, bssid, ETH_ALEN);
48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
49 IEEE80211_STYPE_ACTION); 49 IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 71f370dd24bc..fb12cec4d333 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -32,49 +32,33 @@
32 * for faster lookup and a list for iteration. They are managed using 32 * for faster lookup and a list for iteration. They are managed using
33 * RCU, i.e. access to the list and hash table is protected by RCU. 33 * RCU, i.e. access to the list and hash table is protected by RCU.
34 * 34 *
35 * Upon allocating a STA info structure with sta_info_alloc(), the caller owns 35 * Upon allocating a STA info structure with sta_info_alloc(), the caller
36 * that structure. It must then either destroy it using sta_info_destroy() 36 * owns that structure. It must then insert it into the hash table using
37 * (which is pretty useless) or insert it into the hash table using 37 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter
38 * sta_info_insert() which demotes the reference from ownership to a regular 38 * case (which acquires an rcu read section but must not be called from
39 * RCU-protected reference; if the function is called without protection by an 39 * within one) will the pointer still be valid after the call. Note that
40 * RCU critical section the reference is instantly invalidated. Note that the 40 * the caller may not do much with the STA info before inserting it, in
41 * caller may not do much with the STA info before inserting it, in particular, 41 * particular, it may not start any mesh peer link management or add
42 * it may not start any mesh peer link management or add encryption keys. 42 * encryption keys.
43 * 43 *
44 * When the insertion fails (sta_info_insert()) returns non-zero), the 44 * When the insertion fails (sta_info_insert()) returns non-zero), the
45 * structure will have been freed by sta_info_insert()! 45 * structure will have been freed by sta_info_insert()!
46 * 46 *
47 * sta entries are added by mac80211 when you establish a link with a 47 * Station entries are added by mac80211 when you establish a link with a
48 * peer. This means different things for the different type of interfaces 48 * peer. This means different things for the different type of interfaces
49 * we support. For a regular station this mean we add the AP sta when we 49 * we support. For a regular station this mean we add the AP sta when we
50 * receive an assocation response from the AP. For IBSS this occurs when 50 * receive an assocation response from the AP. For IBSS this occurs when
51 * we receive a probe response or a beacon from target IBSS network. For 51 * get to know about a peer on the same IBSS. For WDS we add the sta for
52 * WDS we add the sta for the peer imediately upon device open. When using 52 * the peer imediately upon device open. When using AP mode we add stations
53 * AP mode we add stations for each respective station upon request from 53 * for each respective station upon request from userspace through nl80211.
54 * userspace through nl80211.
55 * 54 *
56 * Because there are debugfs entries for each station, and adding those 55 * In order to remove a STA info structure, various sta_info_destroy_*()
57 * must be able to sleep, it is also possible to "pin" a station entry, 56 * calls are available.
58 * that means it can be removed from the hash table but not be freed.
59 * See the comment in __sta_info_unlink() for more information, this is
60 * an internal capability only.
61 * 57 *
62 * In order to remove a STA info structure, the caller needs to first 58 * There is no concept of ownership on a STA entry, each structure is
63 * unlink it (sta_info_unlink()) from the list and hash tables and 59 * owned by the global hash table/list until it is removed. All users of
64 * then destroy it; sta_info_destroy() will wait for an RCU grace period 60 * the structure need to be RCU protected so that the structure won't be
65 * to elapse before actually freeing it. Due to the pinning and the 61 * freed before they are done using it.
66 * possibility of multiple callers trying to remove the same STA info at
67 * the same time, sta_info_unlink() can clear the STA info pointer it is
68 * passed to indicate that the STA info is owned by somebody else now.
69 *
70 * If sta_info_unlink() did not clear the pointer then the caller owns
71 * the STA info structure now and is responsible of destroying it with
72 * a call to sta_info_destroy().
73 *
74 * In all other cases, there is no concept of ownership on a STA entry,
75 * each structure is owned by the global hash table/list until it is
76 * removed. All users of the structure need to be RCU protected so that
77 * the structure won't be freed before they are done using it.
78 */ 62 */
79 63
80/* Caller must hold local->sta_lock */ 64/* Caller must hold local->sta_lock */
@@ -103,15 +87,51 @@ static int sta_info_hash_del(struct ieee80211_local *local,
103} 87}
104 88
105/* protected by RCU */ 89/* protected by RCU */
106struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr) 90struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
91 const u8 *addr)
107{ 92{
93 struct ieee80211_local *local = sdata->local;
108 struct sta_info *sta; 94 struct sta_info *sta;
109 95
110 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 96 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
97 rcu_read_lock_held() ||
98 lockdep_is_held(&local->sta_lock) ||
99 lockdep_is_held(&local->sta_mtx));
111 while (sta) { 100 while (sta) {
112 if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 101 if (sta->sdata == sdata &&
102 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
113 break; 103 break;
114 sta = rcu_dereference(sta->hnext); 104 sta = rcu_dereference_check(sta->hnext,
105 rcu_read_lock_held() ||
106 lockdep_is_held(&local->sta_lock) ||
107 lockdep_is_held(&local->sta_mtx));
108 }
109 return sta;
110}
111
112/*
113 * Get sta info either from the specified interface
114 * or from one of its vlans
115 */
116struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
117 const u8 *addr)
118{
119 struct ieee80211_local *local = sdata->local;
120 struct sta_info *sta;
121
122 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
123 rcu_read_lock_held() ||
124 lockdep_is_held(&local->sta_lock) ||
125 lockdep_is_held(&local->sta_mtx));
126 while (sta) {
127 if ((sta->sdata == sdata ||
128 sta->sdata->bss == sdata->bss) &&
129 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
130 break;
131 sta = rcu_dereference_check(sta->hnext,
132 rcu_read_lock_held() ||
133 lockdep_is_held(&local->sta_lock) ||
134 lockdep_is_held(&local->sta_mtx));
115 } 135 }
116 return sta; 136 return sta;
117} 137}
@@ -161,101 +181,6 @@ static void __sta_info_free(struct ieee80211_local *local,
161 kfree(sta); 181 kfree(sta);
162} 182}
163 183
164void sta_info_destroy(struct sta_info *sta)
165{
166 struct ieee80211_local *local;
167 struct sk_buff *skb;
168 int i;
169
170 might_sleep();
171
172 if (!sta)
173 return;
174
175 local = sta->local;
176
177 cancel_work_sync(&sta->drv_unblock_wk);
178
179 rate_control_remove_sta_debugfs(sta);
180 ieee80211_sta_debugfs_remove(sta);
181
182#ifdef CONFIG_MAC80211_MESH
183 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
184 mesh_plink_deactivate(sta);
185#endif
186
187 /*
188 * We have only unlinked the key, and actually destroying it
189 * may mean it is removed from hardware which requires that
190 * the key->sta pointer is still valid, so flush the key todo
191 * list here.
192 *
193 * ieee80211_key_todo() will synchronize_rcu() so after this
194 * nothing can reference this sta struct any more.
195 */
196 ieee80211_key_todo();
197
198#ifdef CONFIG_MAC80211_MESH
199 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
200 del_timer_sync(&sta->plink_timer);
201#endif
202
203 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
204 local->total_ps_buffered--;
205 dev_kfree_skb_any(skb);
206 }
207
208 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
209 dev_kfree_skb_any(skb);
210
211 for (i = 0; i < STA_TID_NUM; i++) {
212 struct tid_ampdu_rx *tid_rx;
213 struct tid_ampdu_tx *tid_tx;
214
215 spin_lock_bh(&sta->lock);
216 tid_rx = sta->ampdu_mlme.tid_rx[i];
217 /* Make sure timer won't free the tid_rx struct, see below */
218 if (tid_rx)
219 tid_rx->shutdown = true;
220
221 spin_unlock_bh(&sta->lock);
222
223 /*
224 * Outside spinlock - shutdown is true now so that the timer
225 * won't free tid_rx, we have to do that now. Can't let the
226 * timer do it because we have to sync the timer outside the
227 * lock that it takes itself.
228 */
229 if (tid_rx) {
230 del_timer_sync(&tid_rx->session_timer);
231 kfree(tid_rx);
232 }
233
234 /*
235 * No need to do such complications for TX agg sessions, the
236 * path leading to freeing the tid_tx struct goes via a call
237 * from the driver, and thus needs to look up the sta struct
238 * again, which cannot be found when we get here. Hence, we
239 * just need to delete the timer and free the aggregation
240 * info; we won't be telling the peer about it then but that
241 * doesn't matter if we're not talking to it again anyway.
242 */
243 tid_tx = sta->ampdu_mlme.tid_tx[i];
244 if (tid_tx) {
245 del_timer_sync(&tid_tx->addba_resp_timer);
246 /*
247 * STA removed while aggregation session being
248 * started? Bit odd, but purge frames anyway.
249 */
250 skb_queue_purge(&tid_tx->pending);
251 kfree(tid_tx);
252 }
253 }
254
255 __sta_info_free(local, sta);
256}
257
258
259/* Caller must hold local->sta_lock */ 184/* Caller must hold local->sta_lock */
260static void sta_info_hash_add(struct ieee80211_local *local, 185static void sta_info_hash_add(struct ieee80211_local *local,
261 struct sta_info *sta) 186 struct sta_info *sta)
@@ -352,7 +277,93 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
352 return sta; 277 return sta;
353} 278}
354 279
355int sta_info_insert(struct sta_info *sta) 280static int sta_info_finish_insert(struct sta_info *sta, bool async)
281{
282 struct ieee80211_local *local = sta->local;
283 struct ieee80211_sub_if_data *sdata = sta->sdata;
284 struct station_info sinfo;
285 unsigned long flags;
286 int err = 0;
287
288 WARN_ON(!mutex_is_locked(&local->sta_mtx));
289
290 /* notify driver */
291 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
292 sdata = container_of(sdata->bss,
293 struct ieee80211_sub_if_data,
294 u.ap);
295 err = drv_sta_add(local, sdata, &sta->sta);
296 if (err) {
297 if (!async)
298 return err;
299 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
300 " - keeping it anyway.\n",
301 sdata->name, sta->sta.addr, err);
302 } else {
303 sta->uploaded = true;
304#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
305 if (async)
306 printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n",
307 wiphy_name(local->hw.wiphy), sta->sta.addr);
308#endif
309 }
310
311 sdata = sta->sdata;
312
313 if (!async) {
314 local->num_sta++;
315 local->sta_generation++;
316 smp_mb();
317
318 /* make the station visible */
319 spin_lock_irqsave(&local->sta_lock, flags);
320 sta_info_hash_add(local, sta);
321 spin_unlock_irqrestore(&local->sta_lock, flags);
322 }
323
324 list_add(&sta->list, &local->sta_list);
325
326 ieee80211_sta_debugfs_add(sta);
327 rate_control_add_sta_debugfs(sta);
328
329 sinfo.filled = 0;
330 sinfo.generation = local->sta_generation;
331 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
332
333
334 return 0;
335}
336
337static void sta_info_finish_pending(struct ieee80211_local *local)
338{
339 struct sta_info *sta;
340 unsigned long flags;
341
342 spin_lock_irqsave(&local->sta_lock, flags);
343 while (!list_empty(&local->sta_pending_list)) {
344 sta = list_first_entry(&local->sta_pending_list,
345 struct sta_info, list);
346 list_del(&sta->list);
347 spin_unlock_irqrestore(&local->sta_lock, flags);
348
349 sta_info_finish_insert(sta, true);
350
351 spin_lock_irqsave(&local->sta_lock, flags);
352 }
353 spin_unlock_irqrestore(&local->sta_lock, flags);
354}
355
356static void sta_info_finish_work(struct work_struct *work)
357{
358 struct ieee80211_local *local =
359 container_of(work, struct ieee80211_local, sta_finish_work);
360
361 mutex_lock(&local->sta_mtx);
362 sta_info_finish_pending(local);
363 mutex_unlock(&local->sta_mtx);
364}
365
366int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
356{ 367{
357 struct ieee80211_local *local = sta->local; 368 struct ieee80211_local *local = sta->local;
358 struct ieee80211_sub_if_data *sdata = sta->sdata; 369 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -364,38 +375,90 @@ int sta_info_insert(struct sta_info *sta)
364 * something inserts a STA (on one CPU) without holding the RTNL 375 * something inserts a STA (on one CPU) without holding the RTNL
365 * and another CPU turns off the net device. 376 * and another CPU turns off the net device.
366 */ 377 */
367 if (unlikely(!netif_running(sdata->dev))) { 378 if (unlikely(!ieee80211_sdata_running(sdata))) {
368 err = -ENETDOWN; 379 err = -ENETDOWN;
380 rcu_read_lock();
369 goto out_free; 381 goto out_free;
370 } 382 }
371 383
372 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 || 384 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
373 is_multicast_ether_addr(sta->sta.addr))) { 385 is_multicast_ether_addr(sta->sta.addr))) {
374 err = -EINVAL; 386 err = -EINVAL;
387 rcu_read_lock();
375 goto out_free; 388 goto out_free;
376 } 389 }
377 390
391 /*
392 * In ad-hoc mode, we sometimes need to insert stations
393 * from tasklet context from the RX path. To avoid races,
394 * always do so in that case -- see the comment below.
395 */
396 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
397 spin_lock_irqsave(&local->sta_lock, flags);
398 /* check if STA exists already */
399 if (sta_info_get_bss(sdata, sta->sta.addr)) {
400 spin_unlock_irqrestore(&local->sta_lock, flags);
401 rcu_read_lock();
402 err = -EEXIST;
403 goto out_free;
404 }
405
406 local->num_sta++;
407 local->sta_generation++;
408 smp_mb();
409 sta_info_hash_add(local, sta);
410
411 list_add_tail(&sta->list, &local->sta_pending_list);
412
413 rcu_read_lock();
414 spin_unlock_irqrestore(&local->sta_lock, flags);
415
416#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
417 printk(KERN_DEBUG "%s: Added IBSS STA %pM\n",
418 wiphy_name(local->hw.wiphy), sta->sta.addr);
419#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
420
421 ieee80211_queue_work(&local->hw, &local->sta_finish_work);
422
423 return 0;
424 }
425
426 /*
427 * On first glance, this will look racy, because the code
428 * below this point, which inserts a station with sleeping,
429 * unlocks the sta_lock between checking existence in the
430 * hash table and inserting into it.
431 *
432 * However, it is not racy against itself because it keeps
433 * the mutex locked. It still seems to race against the
434 * above code that atomically inserts the station... That,
435 * however, is not true because the above code can only
436 * be invoked for IBSS interfaces, and the below code will
437 * not be -- and the two do not race against each other as
438 * the hash table also keys off the interface.
439 */
440
441 might_sleep();
442
443 mutex_lock(&local->sta_mtx);
444
378 spin_lock_irqsave(&local->sta_lock, flags); 445 spin_lock_irqsave(&local->sta_lock, flags);
379 /* check if STA exists already */ 446 /* check if STA exists already */
380 if (sta_info_get(local, sta->sta.addr)) { 447 if (sta_info_get_bss(sdata, sta->sta.addr)) {
381 spin_unlock_irqrestore(&local->sta_lock, flags); 448 spin_unlock_irqrestore(&local->sta_lock, flags);
449 mutex_unlock(&local->sta_mtx);
450 rcu_read_lock();
382 err = -EEXIST; 451 err = -EEXIST;
383 goto out_free; 452 goto out_free;
384 } 453 }
385 list_add(&sta->list, &local->sta_list);
386 local->sta_generation++;
387 local->num_sta++;
388 sta_info_hash_add(local, sta);
389 454
390 /* notify driver */ 455 spin_unlock_irqrestore(&local->sta_lock, flags);
391 if (local->ops->sta_notify) {
392 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
393 sdata = container_of(sdata->bss,
394 struct ieee80211_sub_if_data,
395 u.ap);
396 456
397 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); 457 err = sta_info_finish_insert(sta, false);
398 sdata = sta->sdata; 458 if (err) {
459 mutex_unlock(&local->sta_mtx);
460 rcu_read_lock();
461 goto out_free;
399 } 462 }
400 463
401#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 464#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -403,18 +466,9 @@ int sta_info_insert(struct sta_info *sta)
403 wiphy_name(local->hw.wiphy), sta->sta.addr); 466 wiphy_name(local->hw.wiphy), sta->sta.addr);
404#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 467#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
405 468
406 spin_unlock_irqrestore(&local->sta_lock, flags); 469 /* move reference to rcu-protected */
407 470 rcu_read_lock();
408#ifdef CONFIG_MAC80211_DEBUGFS 471 mutex_unlock(&local->sta_mtx);
409 /*
410 * Debugfs entry adding might sleep, so schedule process
411 * context task for adding entry for STAs that do not yet
412 * have one.
413 * NOTE: due to auto-freeing semantics this may only be done
414 * if the insertion is successful!
415 */
416 schedule_work(&local->sta_debugfs_add);
417#endif
418 472
419 if (ieee80211_vif_is_mesh(&sdata->vif)) 473 if (ieee80211_vif_is_mesh(&sdata->vif))
420 mesh_accept_plinks_update(sdata); 474 mesh_accept_plinks_update(sdata);
@@ -426,6 +480,15 @@ int sta_info_insert(struct sta_info *sta)
426 return err; 480 return err;
427} 481}
428 482
483int sta_info_insert(struct sta_info *sta)
484{
485 int err = sta_info_insert_rcu(sta);
486
487 rcu_read_unlock();
488
489 return err;
490}
491
429static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 492static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
430{ 493{
431 /* 494 /*
@@ -494,108 +557,6 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
494 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 557 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
495} 558}
496 559
497static void __sta_info_unlink(struct sta_info **sta)
498{
499 struct ieee80211_local *local = (*sta)->local;
500 struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
501 /*
502 * pull caller's reference if we're already gone.
503 */
504 if (sta_info_hash_del(local, *sta)) {
505 *sta = NULL;
506 return;
507 }
508
509 if ((*sta)->key) {
510 ieee80211_key_free((*sta)->key);
511 WARN_ON((*sta)->key);
512 }
513
514 list_del(&(*sta)->list);
515 (*sta)->dead = true;
516
517 if (test_and_clear_sta_flags(*sta,
518 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
519 BUG_ON(!sdata->bss);
520
521 atomic_dec(&sdata->bss->num_sta_ps);
522 __sta_info_clear_tim_bit(sdata->bss, *sta);
523 }
524
525 local->num_sta--;
526 local->sta_generation++;
527
528 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
529 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
530
531 if (local->ops->sta_notify) {
532 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
533 sdata = container_of(sdata->bss,
534 struct ieee80211_sub_if_data,
535 u.ap);
536
537 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
538 &(*sta)->sta);
539 sdata = (*sta)->sdata;
540 }
541
542 if (ieee80211_vif_is_mesh(&sdata->vif)) {
543 mesh_accept_plinks_update(sdata);
544#ifdef CONFIG_MAC80211_MESH
545 del_timer(&(*sta)->plink_timer);
546#endif
547 }
548
549#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
550 printk(KERN_DEBUG "%s: Removed STA %pM\n",
551 wiphy_name(local->hw.wiphy), (*sta)->sta.addr);
552#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
553
554 /*
555 * Finally, pull caller's reference if the STA is pinned by the
556 * task that is adding the debugfs entries. In that case, we
557 * leave the STA "to be freed".
558 *
559 * The rules are not trivial, but not too complex either:
560 * (1) pin_status is only modified under the sta_lock
561 * (2) STAs may only be pinned under the RTNL so that
562 * sta_info_flush() is guaranteed to actually destroy
563 * all STAs that are active for a given interface, this
564 * is required for correctness because otherwise we
565 * could notify a driver that an interface is going
566 * away and only after that (!) notify it about a STA
567 * on that interface going away.
568 * (3) sta_info_debugfs_add_work() will set the status
569 * to PINNED when it found an item that needs a new
570 * debugfs directory created. In that case, that item
571 * must not be freed although all *RCU* users are done
572 * with it. Hence, we tell the caller of _unlink()
573 * that the item is already gone (as can happen when
574 * two tasks try to unlink/destroy at the same time)
575 * (4) We set the pin_status to DESTROY here when we
576 * find such an item.
577 * (5) sta_info_debugfs_add_work() will reset the pin_status
578 * from PINNED to NORMAL when it is done with the item,
579 * but will check for DESTROY before resetting it in
580 * which case it will free the item.
581 */
582 if ((*sta)->pin_status == STA_INFO_PIN_STAT_PINNED) {
583 (*sta)->pin_status = STA_INFO_PIN_STAT_DESTROY;
584 *sta = NULL;
585 return;
586 }
587}
588
589void sta_info_unlink(struct sta_info **sta)
590{
591 struct ieee80211_local *local = (*sta)->local;
592 unsigned long flags;
593
594 spin_lock_irqsave(&local->sta_lock, flags);
595 __sta_info_unlink(sta);
596 spin_unlock_irqrestore(&local->sta_lock, flags);
597}
598
599static int sta_info_buffer_expired(struct sta_info *sta, 560static int sta_info_buffer_expired(struct sta_info *sta,
600 struct sk_buff *skb) 561 struct sk_buff *skb)
601{ 562{
@@ -652,109 +613,209 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
652 } 613 }
653} 614}
654 615
655 616static int __must_check __sta_info_destroy(struct sta_info *sta)
656static void sta_info_cleanup(unsigned long data)
657{ 617{
658 struct ieee80211_local *local = (struct ieee80211_local *) data; 618 struct ieee80211_local *local;
659 struct sta_info *sta; 619 struct ieee80211_sub_if_data *sdata;
620 struct sk_buff *skb;
621 unsigned long flags;
622 int ret, i;
660 623
661 rcu_read_lock(); 624 might_sleep();
662 list_for_each_entry_rcu(sta, &local->sta_list, list)
663 sta_info_cleanup_expire_buffered(local, sta);
664 rcu_read_unlock();
665 625
666 if (local->quiescing) 626 if (!sta)
667 return; 627 return -ENOENT;
668 628
669 local->sta_cleanup.expires = 629 local = sta->local;
670 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 630 sdata = sta->sdata;
671 add_timer(&local->sta_cleanup);
672}
673 631
674#ifdef CONFIG_MAC80211_DEBUGFS 632 spin_lock_irqsave(&local->sta_lock, flags);
675/* 633 ret = sta_info_hash_del(local, sta);
676 * See comment in __sta_info_unlink, 634 /* this might still be the pending list ... which is fine */
677 * caller must hold local->sta_lock. 635 if (!ret)
678 */ 636 list_del(&sta->list);
679static void __sta_info_pin(struct sta_info *sta) 637 spin_unlock_irqrestore(&local->sta_lock, flags);
680{ 638 if (ret)
681 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_NORMAL); 639 return ret;
682 sta->pin_status = STA_INFO_PIN_STAT_PINNED; 640
641 if (sta->key) {
642 ieee80211_key_free(sta->key);
643 /*
644 * We have only unlinked the key, and actually destroying it
645 * may mean it is removed from hardware which requires that
646 * the key->sta pointer is still valid, so flush the key todo
647 * list here.
648 *
649 * ieee80211_key_todo() will synchronize_rcu() so after this
650 * nothing can reference this sta struct any more.
651 */
652 ieee80211_key_todo();
653
654 WARN_ON(sta->key);
655 }
656
657 sta->dead = true;
658
659 if (test_and_clear_sta_flags(sta,
660 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
661 BUG_ON(!sdata->bss);
662
663 atomic_dec(&sdata->bss->num_sta_ps);
664 __sta_info_clear_tim_bit(sdata->bss, sta);
665 }
666
667 local->num_sta--;
668 local->sta_generation++;
669
670 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
671 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
672
673 if (sta->uploaded) {
674 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
675 sdata = container_of(sdata->bss,
676 struct ieee80211_sub_if_data,
677 u.ap);
678 drv_sta_remove(local, sdata, &sta->sta);
679 sdata = sta->sdata;
680 }
681
682#ifdef CONFIG_MAC80211_MESH
683 if (ieee80211_vif_is_mesh(&sdata->vif)) {
684 mesh_accept_plinks_update(sdata);
685 del_timer(&sta->plink_timer);
686 }
687#endif
688
689#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
690 printk(KERN_DEBUG "%s: Removed STA %pM\n",
691 wiphy_name(local->hw.wiphy), sta->sta.addr);
692#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
693 cancel_work_sync(&sta->drv_unblock_wk);
694
695 rate_control_remove_sta_debugfs(sta);
696 ieee80211_sta_debugfs_remove(sta);
697
698#ifdef CONFIG_MAC80211_MESH
699 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
700 mesh_plink_deactivate(sta);
701 del_timer_sync(&sta->plink_timer);
702 }
703#endif
704
705 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
706 local->total_ps_buffered--;
707 dev_kfree_skb_any(skb);
708 }
709
710 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
711 dev_kfree_skb_any(skb);
712
713 for (i = 0; i < STA_TID_NUM; i++) {
714 struct tid_ampdu_rx *tid_rx;
715 struct tid_ampdu_tx *tid_tx;
716
717 spin_lock_bh(&sta->lock);
718 tid_rx = sta->ampdu_mlme.tid_rx[i];
719 /* Make sure timer won't free the tid_rx struct, see below */
720 if (tid_rx)
721 tid_rx->shutdown = true;
722
723 spin_unlock_bh(&sta->lock);
724
725 /*
726 * Outside spinlock - shutdown is true now so that the timer
727 * won't free tid_rx, we have to do that now. Can't let the
728 * timer do it because we have to sync the timer outside the
729 * lock that it takes itself.
730 */
731 if (tid_rx) {
732 del_timer_sync(&tid_rx->session_timer);
733 kfree(tid_rx);
734 }
735
736 /*
737 * No need to do such complications for TX agg sessions, the
738 * path leading to freeing the tid_tx struct goes via a call
739 * from the driver, and thus needs to look up the sta struct
740 * again, which cannot be found when we get here. Hence, we
741 * just need to delete the timer and free the aggregation
742 * info; we won't be telling the peer about it then but that
743 * doesn't matter if we're not talking to it again anyway.
744 */
745 tid_tx = sta->ampdu_mlme.tid_tx[i];
746 if (tid_tx) {
747 del_timer_sync(&tid_tx->addba_resp_timer);
748 /*
749 * STA removed while aggregation session being
750 * started? Bit odd, but purge frames anyway.
751 */
752 skb_queue_purge(&tid_tx->pending);
753 kfree(tid_tx);
754 }
755 }
756
757 __sta_info_free(local, sta);
758
759 return 0;
683} 760}
684 761
685/* 762int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
686 * See comment in __sta_info_unlink, returns sta if it
687 * needs to be destroyed.
688 */
689static struct sta_info *__sta_info_unpin(struct sta_info *sta)
690{ 763{
691 struct sta_info *ret = NULL; 764 struct sta_info *sta;
692 unsigned long flags; 765 int ret;
693 766
694 spin_lock_irqsave(&sta->local->sta_lock, flags); 767 mutex_lock(&sdata->local->sta_mtx);
695 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_DESTROY && 768 sta = sta_info_get(sdata, addr);
696 sta->pin_status != STA_INFO_PIN_STAT_PINNED); 769 ret = __sta_info_destroy(sta);
697 if (sta->pin_status == STA_INFO_PIN_STAT_DESTROY) 770 mutex_unlock(&sdata->local->sta_mtx);
698 ret = sta;
699 sta->pin_status = STA_INFO_PIN_STAT_NORMAL;
700 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
701 771
702 return ret; 772 return ret;
703} 773}
704 774
705static void sta_info_debugfs_add_work(struct work_struct *work) 775int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
776 const u8 *addr)
706{ 777{
707 struct ieee80211_local *local = 778 struct sta_info *sta;
708 container_of(work, struct ieee80211_local, sta_debugfs_add); 779 int ret;
709 struct sta_info *sta, *tmp;
710 unsigned long flags;
711 780
712 /* We need to keep the RTNL across the whole pinned status. */ 781 mutex_lock(&sdata->local->sta_mtx);
713 rtnl_lock(); 782 sta = sta_info_get_bss(sdata, addr);
714 while (1) { 783 ret = __sta_info_destroy(sta);
715 sta = NULL; 784 mutex_unlock(&sdata->local->sta_mtx);
716 785
717 spin_lock_irqsave(&local->sta_lock, flags); 786 return ret;
718 list_for_each_entry(tmp, &local->sta_list, list) { 787}
719 /*
720 * debugfs.add_has_run will be set by
721 * ieee80211_sta_debugfs_add regardless
722 * of what else it does.
723 */
724 if (!tmp->debugfs.add_has_run) {
725 sta = tmp;
726 __sta_info_pin(sta);
727 break;
728 }
729 }
730 spin_unlock_irqrestore(&local->sta_lock, flags);
731 788
732 if (!sta) 789static void sta_info_cleanup(unsigned long data)
733 break; 790{
791 struct ieee80211_local *local = (struct ieee80211_local *) data;
792 struct sta_info *sta;
734 793
735 ieee80211_sta_debugfs_add(sta); 794 rcu_read_lock();
736 rate_control_add_sta_debugfs(sta); 795 list_for_each_entry_rcu(sta, &local->sta_list, list)
796 sta_info_cleanup_expire_buffered(local, sta);
797 rcu_read_unlock();
737 798
738 sta = __sta_info_unpin(sta); 799 if (local->quiescing)
739 sta_info_destroy(sta); 800 return;
740 } 801
741 rtnl_unlock(); 802 local->sta_cleanup.expires =
803 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
804 add_timer(&local->sta_cleanup);
742} 805}
743#endif
744 806
745void sta_info_init(struct ieee80211_local *local) 807void sta_info_init(struct ieee80211_local *local)
746{ 808{
747 spin_lock_init(&local->sta_lock); 809 spin_lock_init(&local->sta_lock);
810 mutex_init(&local->sta_mtx);
748 INIT_LIST_HEAD(&local->sta_list); 811 INIT_LIST_HEAD(&local->sta_list);
812 INIT_LIST_HEAD(&local->sta_pending_list);
813 INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
749 814
750 setup_timer(&local->sta_cleanup, sta_info_cleanup, 815 setup_timer(&local->sta_cleanup, sta_info_cleanup,
751 (unsigned long)local); 816 (unsigned long)local);
752 local->sta_cleanup.expires = 817 local->sta_cleanup.expires =
753 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 818 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
754
755#ifdef CONFIG_MAC80211_DEBUGFS
756 INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_work);
757#endif
758} 819}
759 820
760int sta_info_start(struct ieee80211_local *local) 821int sta_info_start(struct ieee80211_local *local)
@@ -766,16 +827,6 @@ int sta_info_start(struct ieee80211_local *local)
766void sta_info_stop(struct ieee80211_local *local) 827void sta_info_stop(struct ieee80211_local *local)
767{ 828{
768 del_timer(&local->sta_cleanup); 829 del_timer(&local->sta_cleanup);
769#ifdef CONFIG_MAC80211_DEBUGFS
770 /*
771 * Make sure the debugfs adding work isn't pending after this
772 * because we're about to be destroyed. It doesn't matter
773 * whether it ran or not since we're going to flush all STAs
774 * anyway.
775 */
776 cancel_work_sync(&local->sta_debugfs_add);
777#endif
778
779 sta_info_flush(local, NULL); 830 sta_info_flush(local, NULL);
780} 831}
781 832
@@ -791,26 +842,19 @@ int sta_info_flush(struct ieee80211_local *local,
791 struct ieee80211_sub_if_data *sdata) 842 struct ieee80211_sub_if_data *sdata)
792{ 843{
793 struct sta_info *sta, *tmp; 844 struct sta_info *sta, *tmp;
794 LIST_HEAD(tmp_list);
795 int ret = 0; 845 int ret = 0;
796 unsigned long flags;
797 846
798 might_sleep(); 847 might_sleep();
799 848
800 spin_lock_irqsave(&local->sta_lock, flags); 849 mutex_lock(&local->sta_mtx);
850
851 sta_info_finish_pending(local);
852
801 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 853 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
802 if (!sdata || sdata == sta->sdata) { 854 if (!sdata || sdata == sta->sdata)
803 __sta_info_unlink(&sta); 855 WARN_ON(__sta_info_destroy(sta));
804 if (sta) {
805 list_add_tail(&sta->list, &tmp_list);
806 ret++;
807 }
808 }
809 } 856 }
810 spin_unlock_irqrestore(&local->sta_lock, flags); 857 mutex_unlock(&local->sta_mtx);
811
812 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
813 sta_info_destroy(sta);
814 858
815 return ret; 859 return ret;
816} 860}
@@ -820,34 +864,28 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
820{ 864{
821 struct ieee80211_local *local = sdata->local; 865 struct ieee80211_local *local = sdata->local;
822 struct sta_info *sta, *tmp; 866 struct sta_info *sta, *tmp;
823 LIST_HEAD(tmp_list);
824 unsigned long flags;
825 867
826 spin_lock_irqsave(&local->sta_lock, flags); 868 mutex_lock(&local->sta_mtx);
827 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) 869 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
828 if (time_after(jiffies, sta->last_rx + exp_time)) { 870 if (time_after(jiffies, sta->last_rx + exp_time)) {
829#ifdef CONFIG_MAC80211_IBSS_DEBUG 871#ifdef CONFIG_MAC80211_IBSS_DEBUG
830 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", 872 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
831 sdata->dev->name, sta->sta.addr); 873 sdata->name, sta->sta.addr);
832#endif 874#endif
833 __sta_info_unlink(&sta); 875 WARN_ON(__sta_info_destroy(sta));
834 if (sta)
835 list_add(&sta->list, &tmp_list);
836 } 876 }
837 spin_unlock_irqrestore(&local->sta_lock, flags); 877 mutex_unlock(&local->sta_mtx);
838
839 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
840 sta_info_destroy(sta);
841} 878}
842 879
843struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, 880struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
844 const u8 *addr) 881 const u8 *addr)
845{ 882{
846 struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); 883 struct sta_info *sta, *nxt;
847 884
848 if (!sta) 885 /* Just return a random station ... first in list ... */
849 return NULL; 886 for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
850 return &sta->sta; 887 return &sta->sta;
888 return NULL;
851} 889}
852EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw); 890EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
853 891
@@ -872,7 +910,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
872 struct ieee80211_local *local = sdata->local; 910 struct ieee80211_local *local = sdata->local;
873 int sent, buffered; 911 int sent, buffered;
874 912
875 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); 913 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
876 914
877 if (!skb_queue_empty(&sta->ps_tx_buf)) 915 if (!skb_queue_empty(&sta->ps_tx_buf))
878 sta_info_clear_tim_bit(sta); 916 sta_info_clear_tim_bit(sta);
@@ -885,7 +923,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
885 923
886#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 924#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
887 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 925 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
888 "since STA not sleeping anymore\n", sdata->dev->name, 926 "since STA not sleeping anymore\n", sdata->name,
889 sta->sta.addr, sta->sta.aid, sent - buffered, buffered); 927 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
890#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 928#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
891} 929}
@@ -944,7 +982,7 @@ void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
944 */ 982 */
945 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " 983 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
946 "though there are no buffered frames for it\n", 984 "though there are no buffered frames for it\n",
947 sdata->dev->name, sta->sta.addr); 985 sdata->name, sta->sta.addr);
948#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 986#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
949 } 987 }
950} 988}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b4810f6aa94f..822d84522937 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,6 +42,9 @@
42 * be in the queues 42 * be in the queues
43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
44 * station in power-save mode, reply when the driver unblocks. 44 * station in power-save mode, reply when the driver unblocks.
45 * @WLAN_STA_DISASSOC: Disassociation in progress.
46 * This is used to reject TX BA session requests when disassociation
47 * is in progress.
45 */ 48 */
46enum ieee80211_sta_info_flags { 49enum ieee80211_sta_info_flags {
47 WLAN_STA_AUTH = 1<<0, 50 WLAN_STA_AUTH = 1<<0,
@@ -57,6 +60,7 @@ enum ieee80211_sta_info_flags {
57 WLAN_STA_SUSPEND = 1<<11, 60 WLAN_STA_SUSPEND = 1<<11,
58 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
59 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_DISASSOC = 1<<14,
60}; 64};
61 65
62#define STA_TID_NUM 16 66#define STA_TID_NUM 16
@@ -162,11 +166,6 @@ struct sta_ampdu_mlme {
162}; 166};
163 167
164 168
165/* see __sta_info_unlink */
166#define STA_INFO_PIN_STAT_NORMAL 0
167#define STA_INFO_PIN_STAT_PINNED 1
168#define STA_INFO_PIN_STAT_DESTROY 2
169
170/** 169/**
171 * struct sta_info - STA information 170 * struct sta_info - STA information
172 * 171 *
@@ -187,7 +186,6 @@ struct sta_ampdu_mlme {
187 * @flaglock: spinlock for flags accesses 186 * @flaglock: spinlock for flags accesses
188 * @drv_unblock_wk: used for driver PS unblocking 187 * @drv_unblock_wk: used for driver PS unblocking
189 * @listen_interval: listen interval of this station, when we're acting as AP 188 * @listen_interval: listen interval of this station, when we're acting as AP
190 * @pin_status: used internally for pinning a STA struct into memory
191 * @flags: STA flags, see &enum ieee80211_sta_info_flags 189 * @flags: STA flags, see &enum ieee80211_sta_info_flags
192 * @ps_tx_buf: buffer of frames to transmit to this station 190 * @ps_tx_buf: buffer of frames to transmit to this station
193 * when it leaves power saving state 191 * when it leaves power saving state
@@ -226,6 +224,7 @@ struct sta_ampdu_mlme {
226 * @debugfs: debug filesystem info 224 * @debugfs: debug filesystem info
227 * @sta: station information we share with the driver 225 * @sta: station information we share with the driver
228 * @dead: set to true when sta is unlinked 226 * @dead: set to true when sta is unlinked
227 * @uploaded: set to true when sta is uploaded to the driver
229 */ 228 */
230struct sta_info { 229struct sta_info {
231 /* General information, mostly static */ 230 /* General information, mostly static */
@@ -245,11 +244,7 @@ struct sta_info {
245 244
246 bool dead; 245 bool dead;
247 246
248 /* 247 bool uploaded;
249 * for use by the internal lifetime management,
250 * see __sta_info_unlink
251 */
252 u8 pin_status;
253 248
254 /* 249 /*
255 * frequently updated, locked with own spinlock (flaglock), 250 * frequently updated, locked with own spinlock (flaglock),
@@ -403,9 +398,37 @@ static inline u32 get_sta_flags(struct sta_info *sta)
403#define STA_INFO_CLEANUP_INTERVAL (10 * HZ) 398#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
404 399
405/* 400/*
406 * Get a STA info, must have be under RCU read lock. 401 * Get a STA info, must be under RCU read lock.
407 */ 402 */
408struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr); 403struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
404 const u8 *addr);
405
406struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
407 const u8 *addr);
408
409static inline
410void for_each_sta_info_type_check(struct ieee80211_local *local,
411 const u8 *addr,
412 struct sta_info *sta,
413 struct sta_info *nxt)
414{
415}
416
417#define for_each_sta_info(local, _addr, sta, nxt) \
418 for ( /* initialise loop */ \
419 sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
420 nxt = sta ? rcu_dereference(sta->hnext) : NULL; \
421 /* typecheck */ \
422 for_each_sta_info_type_check(local, (_addr), sta, nxt), \
423 /* continue condition */ \
424 sta; \
425 /* advance loop */ \
426 sta = nxt, \
427 nxt = sta ? rcu_dereference(sta->hnext) : NULL \
428 ) \
429 /* compare address and run code only if it matches */ \
430 if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
431
409/* 432/*
410 * Get STA info by index, BROKEN! 433 * Get STA info by index, BROKEN!
411 */ 434 */
@@ -421,18 +444,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
421 * Insert STA info into hash table/list, returns zero or a 444 * Insert STA info into hash table/list, returns zero or a
422 * -EEXIST if (if the same MAC address is already present). 445 * -EEXIST if (if the same MAC address is already present).
423 * 446 *
424 * Calling this without RCU protection makes the caller 447 * Calling the non-rcu version makes the caller relinquish,
425 * relinquish its reference to @sta. 448 * the _rcu version calls read_lock_rcu() and must be called
449 * without it held.
426 */ 450 */
427int sta_info_insert(struct sta_info *sta); 451int sta_info_insert(struct sta_info *sta);
428/* 452int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
429 * Unlink a STA info from the hash table/list. 453int sta_info_insert_atomic(struct sta_info *sta);
430 * This can NULL the STA pointer if somebody else 454
431 * has already unlinked it. 455int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
432 */ 456 const u8 *addr);
433void sta_info_unlink(struct sta_info **sta); 457int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
458 const u8 *addr);
434 459
435void sta_info_destroy(struct sta_info *sta);
436void sta_info_set_tim_bit(struct sta_info *sta); 460void sta_info_set_tim_bit(struct sta_info *sta);
437void sta_info_clear_tim_bit(struct sta_info *sta); 461void sta_info_clear_tim_bit(struct sta_info *sta);
438 462
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index d78f36c64c7b..56d5b9a6ec5b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -2,7 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -45,29 +45,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
46 46
47 /* 47 /*
48 * XXX: This is temporary! 48 * This skb 'survived' a round-trip through the driver, and
49 * 49 * hopefully the driver didn't mangle it too badly. However,
50 * The problem here is that when we get here, the driver will 50 * we can definitely not rely on the the control information
51 * quite likely have pretty much overwritten info->control by 51 * being correct. Clear it so we don't get junk there, and
52 * using info->driver_data or info->rate_driver_data. Thus, 52 * indicate that it needs new processing, but must not be
53 * when passing out the frame to the driver again, we would be 53 * modified/encrypted again.
54 * passing completely bogus data since the driver would then
55 * expect a properly filled info->control. In mac80211 itself
56 * the same problem occurs, since we need info->control.vif
57 * internally.
58 *
59 * To fix this, we should send the frame through TX processing
60 * again. However, it's not that simple, since the frame will
61 * have been software-encrypted (if applicable) already, and
62 * encrypting it again doesn't do much good. So to properly do
63 * that, we not only have to skip the actual 'raw' encryption
64 * (key selection etc. still has to be done!) but also the
65 * sequence number assignment since that impacts the crypto
66 * encapsulation, of course.
67 *
68 * Hence, for now, fix the bug by just dropping the frame.
69 */ 54 */
70 goto drop; 55 memset(&info->control, 0, sizeof(info->control));
56
57 info->control.jiffies = jiffies;
58 info->control.vif = &sta->sdata->vif;
59 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
60 IEEE80211_TX_INTFL_RETRANSMISSION;
71 61
72 sta->tx_filtered_count++; 62 sta->tx_filtered_count++;
73 63
@@ -122,7 +112,6 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
122 return; 112 return;
123 } 113 }
124 114
125 drop:
126#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 115#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
127 if (net_ratelimit()) 116 if (net_ratelimit())
128 printk(KERN_DEBUG "%s: dropped TX filtered frame, " 117 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
@@ -134,6 +123,40 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
134 dev_kfree_skb(skb); 123 dev_kfree_skb(skb);
135} 124}
136 125
126static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
127{
128 struct ieee80211_mgmt *mgmt = (void *) skb->data;
129 struct ieee80211_local *local = sta->local;
130 struct ieee80211_sub_if_data *sdata = sta->sdata;
131
132 if (ieee80211_is_action(mgmt->frame_control) &&
133 sdata->vif.type == NL80211_IFTYPE_STATION &&
134 mgmt->u.action.category == WLAN_CATEGORY_HT &&
135 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
136 /*
137 * This update looks racy, but isn't -- if we come
138 * here we've definitely got a station that we're
139 * talking to, and on a managed interface that can
140 * only be the AP. And the only other place updating
141 * this variable is before we're associated.
142 */
143 switch (mgmt->u.action.u.ht_smps.smps_control) {
144 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
145 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
146 break;
147 case WLAN_HT_SMPS_CONTROL_STATIC:
148 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
149 break;
150 case WLAN_HT_SMPS_CONTROL_DISABLED:
151 default: /* shouldn't happen since we don't send that */
152 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
153 break;
154 }
155
156 ieee80211_queue_work(&local->hw, &local->recalc_smps);
157 }
158}
159
137void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 160void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
138{ 161{
139 struct sk_buff *skb2; 162 struct sk_buff *skb2;
@@ -146,7 +169,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
146 struct ieee80211_tx_status_rtap_hdr *rthdr; 169 struct ieee80211_tx_status_rtap_hdr *rthdr;
147 struct ieee80211_sub_if_data *sdata; 170 struct ieee80211_sub_if_data *sdata;
148 struct net_device *prev_dev = NULL; 171 struct net_device *prev_dev = NULL;
149 struct sta_info *sta; 172 struct sta_info *sta, *tmp;
150 int retry_count = -1, i; 173 int retry_count = -1, i;
151 bool injected; 174 bool injected;
152 175
@@ -165,10 +188,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
165 rcu_read_lock(); 188 rcu_read_lock();
166 189
167 sband = local->hw.wiphy->bands[info->band]; 190 sband = local->hw.wiphy->bands[info->band];
191 fc = hdr->frame_control;
168 192
169 sta = sta_info_get(local, hdr->addr1); 193 for_each_sta_info(local, hdr->addr1, sta, tmp) {
194 /* skip wrong virtual interface */
195 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
196 continue;
170 197
171 if (sta) {
172 if (!(info->flags & IEEE80211_TX_STAT_ACK) && 198 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
173 test_sta_flags(sta, WLAN_STA_PS_STA)) { 199 test_sta_flags(sta, WLAN_STA_PS_STA)) {
174 /* 200 /*
@@ -180,8 +206,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
180 return; 206 return;
181 } 207 }
182 208
183 fc = hdr->frame_control;
184
185 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && 209 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
186 (ieee80211_is_data_qos(fc))) { 210 (ieee80211_is_data_qos(fc))) {
187 u16 tid, ssn; 211 u16 tid, ssn;
@@ -208,6 +232,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
208 rate_control_tx_status(local, sband, sta, skb); 232 rate_control_tx_status(local, sband, sta, skb);
209 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) 233 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
210 ieee80211s_update_metric(local, sta, skb); 234 ieee80211s_update_metric(local, sta, skb);
235
236 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
237 (info->flags & IEEE80211_TX_STAT_ACK))
238 ieee80211_frame_acked(sta, skb);
211 } 239 }
212 240
213 rcu_read_unlock(); 241 rcu_read_unlock();
@@ -246,6 +274,25 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
246 local->dot11FailedCount++; 274 local->dot11FailedCount++;
247 } 275 }
248 276
277 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
278 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
279 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
280 local->ps_sdata && !(local->scanning)) {
281 if (info->flags & IEEE80211_TX_STAT_ACK) {
282 local->ps_sdata->u.mgd.flags |=
283 IEEE80211_STA_NULLFUNC_ACKED;
284 ieee80211_queue_work(&local->hw,
285 &local->dynamic_ps_enable_work);
286 } else
287 mod_timer(&local->dynamic_ps_timer, jiffies +
288 msecs_to_jiffies(10));
289 }
290
291 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX)
292 cfg80211_action_tx_status(
293 skb->dev, (unsigned long) skb, skb->data, skb->len,
294 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
295
249 /* this was a transmitted frame, but now we want to reuse it */ 296 /* this was a transmitted frame, but now we want to reuse it */
250 skb_orphan(skb); 297 skb_orphan(skb);
251 298
@@ -311,7 +358,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
311 rcu_read_lock(); 358 rcu_read_lock();
312 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 359 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
313 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { 360 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
314 if (!netif_running(sdata->dev)) 361 if (!ieee80211_sdata_running(sdata))
315 continue; 362 continue;
316 363
317 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && 364 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 4921d724b6c7..7ef491e9d66d 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -100,7 +100,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); 100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; 101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
102 } 102 }
103 ctx->initialized = 1; 103 ctx->state = TKIP_STATE_PHASE1_DONE;
104} 104}
105 105
106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, 106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -183,7 +183,7 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
183 /* Update the p1k only when the iv16 in the packet wraps around, this 183 /* Update the p1k only when the iv16 in the packet wraps around, this
184 * might occur after the wrap around of iv16 in the key in case of 184 * might occur after the wrap around of iv16 in the key in case of
185 * fragmented packets. */ 185 * fragmented packets. */
186 if (iv16 == 0 || !ctx->initialized) 186 if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32); 187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
188 188
189 if (type == IEEE80211_TKIP_P1_KEY) { 189 if (type == IEEE80211_TKIP_P1_KEY) {
@@ -195,11 +195,13 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
195} 195}
196EXPORT_SYMBOL(ieee80211_get_tkip_key); 196EXPORT_SYMBOL(ieee80211_get_tkip_key);
197 197
198/* Encrypt packet payload with TKIP using @key. @pos is a pointer to the 198/*
199 * Encrypt packet payload with TKIP using @key. @pos is a pointer to the
199 * beginning of the buffer containing payload. This payload must include 200 * beginning of the buffer containing payload. This payload must include
200 * headroom of eight octets for IV and Ext. IV and taildroom of four octets 201 * the IV/Ext.IV and space for (taildroom) four octets for ICV.
201 * for ICV. @payload_len is the length of payload (_not_ including extra 202 * @payload_len is the length of payload (_not_ including IV/ICV length).
202 * headroom and tailroom). @ta is the transmitter addresses. */ 203 * @ta is the transmitter addresses.
204 */
203void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 205void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
204 struct ieee80211_key *key, 206 struct ieee80211_key *key,
205 u8 *pos, size_t payload_len, u8 *ta) 207 u8 *pos, size_t payload_len, u8 *ta)
@@ -209,12 +211,11 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
209 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; 211 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
210 212
211 /* Calculate per-packet key */ 213 /* Calculate per-packet key */
212 if (ctx->iv16 == 0 || !ctx->initialized) 214 if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
213 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32); 215 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
214 216
215 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); 217 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
216 218
217 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
218 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 219 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
219} 220}
220 221
@@ -259,7 +260,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
259 if ((keyid >> 6) != key->conf.keyidx) 260 if ((keyid >> 6) != key->conf.keyidx)
260 return TKIP_DECRYPT_INVALID_KEYIDX; 261 return TKIP_DECRYPT_INVALID_KEYIDX;
261 262
262 if (key->u.tkip.rx[queue].initialized && 263 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
263 (iv32 < key->u.tkip.rx[queue].iv32 || 264 (iv32 < key->u.tkip.rx[queue].iv32 ||
264 (iv32 == key->u.tkip.rx[queue].iv32 && 265 (iv32 == key->u.tkip.rx[queue].iv32 &&
265 iv16 <= key->u.tkip.rx[queue].iv16))) { 266 iv16 <= key->u.tkip.rx[queue].iv16))) {
@@ -275,11 +276,11 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
275 276
276 if (only_iv) { 277 if (only_iv) {
277 res = TKIP_DECRYPT_OK; 278 res = TKIP_DECRYPT_OK;
278 key->u.tkip.rx[queue].initialized = 1; 279 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
279 goto done; 280 goto done;
280 } 281 }
281 282
282 if (!key->u.tkip.rx[queue].initialized || 283 if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
283 key->u.tkip.rx[queue].iv32 != iv32) { 284 key->u.tkip.rx[queue].iv32 != iv32) {
284 /* IV16 wrapped around - perform TKIP phase 1 */ 285 /* IV16 wrapped around - perform TKIP phase 1 */
285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); 286 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
@@ -299,18 +300,18 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
299 printk("\n"); 300 printk("\n");
300 } 301 }
301#endif 302#endif
302 if (key->local->ops->update_tkip_key && 303 }
303 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 304 if (key->local->ops->update_tkip_key &&
304 static const u8 bcast[ETH_ALEN] = 305 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
305 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 306 key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
306 const u8 *sta_addr = key->sta->sta.addr; 307 struct ieee80211_sub_if_data *sdata = key->sdata;
307 308
308 if (is_multicast_ether_addr(ra)) 309 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
309 sta_addr = bcast; 310 sdata = container_of(key->sdata->bss,
310 311 struct ieee80211_sub_if_data, u.ap);
311 drv_update_tkip_key(key->local, &key->conf, sta_addr, 312 drv_update_tkip_key(key->local, sdata, &key->conf, key->sta,
312 iv32, key->u.tkip.rx[queue].p1k); 313 iv32, key->u.tkip.rx[queue].p1k);
313 } 314 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
314 } 315 }
315 316
316 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); 317 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ac210b586702..cfc473e1b050 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -180,6 +180,71 @@ static int inline is_ieee80211_device(struct ieee80211_local *local,
180} 180}
181 181
182/* tx handlers */ 182/* tx handlers */
183static ieee80211_tx_result debug_noinline
184ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
185{
186 struct ieee80211_local *local = tx->local;
187 struct ieee80211_if_managed *ifmgd;
188
189 /* driver doesn't support power save */
190 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
191 return TX_CONTINUE;
192
193 /* hardware does dynamic power save */
194 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
195 return TX_CONTINUE;
196
197 /* dynamic power save disabled */
198 if (local->hw.conf.dynamic_ps_timeout <= 0)
199 return TX_CONTINUE;
200
201 /* we are scanning, don't enable power save */
202 if (local->scanning)
203 return TX_CONTINUE;
204
205 if (!local->ps_sdata)
206 return TX_CONTINUE;
207
208 /* No point if we're going to suspend */
209 if (local->quiescing)
210 return TX_CONTINUE;
211
212 /* dynamic ps is supported only in managed mode */
213 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
214 return TX_CONTINUE;
215
216 ifmgd = &tx->sdata->u.mgd;
217
218 /*
219 * Don't wakeup from power save if u-apsd is enabled, voip ac has
220 * u-apsd enabled and the frame is in voip class. This effectively
221 * means that even if all access categories have u-apsd enabled, in
222 * practise u-apsd is only used with the voip ac. This is a
223 * workaround for the case when received voip class packets do not
224 * have correct qos tag for some reason, due the network or the
225 * peer application.
226 *
227 * Note: local->uapsd_queues access is racy here. If the value is
228 * changed via debugfs, user needs to reassociate manually to have
229 * everything in sync.
230 */
231 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
232 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
233 && skb_get_queue_mapping(tx->skb) == 0)
234 return TX_CONTINUE;
235
236 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
237 ieee80211_stop_queues_by_reason(&local->hw,
238 IEEE80211_QUEUE_STOP_REASON_PS);
239 ieee80211_queue_work(&local->hw,
240 &local->dynamic_ps_disable_work);
241 }
242
243 mod_timer(&local->dynamic_ps_timer, jiffies +
244 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
245
246 return TX_CONTINUE;
247}
183 248
184static ieee80211_tx_result debug_noinline 249static ieee80211_tx_result debug_noinline
185ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 250ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
@@ -223,7 +288,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
223#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 288#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
224 printk(KERN_DEBUG "%s: dropped data frame to not " 289 printk(KERN_DEBUG "%s: dropped data frame to not "
225 "associated station %pM\n", 290 "associated station %pM\n",
226 tx->dev->name, hdr->addr1); 291 tx->sdata->name, hdr->addr1);
227#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 292#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
228 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 293 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
229 return TX_DROP; 294 return TX_DROP;
@@ -331,7 +396,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
331#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 396#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
332 if (net_ratelimit()) 397 if (net_ratelimit())
333 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", 398 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
334 tx->dev->name); 399 tx->sdata->name);
335#endif 400#endif
336 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 401 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
337 } else 402 } else
@@ -391,7 +456,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
391 if (net_ratelimit()) { 456 if (net_ratelimit()) {
392 printk(KERN_DEBUG "%s: STA %pM TX " 457 printk(KERN_DEBUG "%s: STA %pM TX "
393 "buffer full - dropping oldest frame\n", 458 "buffer full - dropping oldest frame\n",
394 tx->dev->name, sta->sta.addr); 459 tx->sdata->name, sta->sta.addr);
395 } 460 }
396#endif 461#endif
397 dev_kfree_skb(old); 462 dev_kfree_skb(old);
@@ -416,7 +481,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
416#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 481#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
417 else if (unlikely(staflags & WLAN_STA_PS_STA)) { 482 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
418 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 483 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
419 "set -> send frame\n", tx->dev->name, 484 "set -> send frame\n", tx->sdata->name,
420 sta->sta.addr); 485 sta->sta.addr);
421 } 486 }
422#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 487#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -464,6 +529,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
464 tx->key = NULL; 529 tx->key = NULL;
465 530
466 if (tx->key) { 531 if (tx->key) {
532 bool skip_hw = false;
533
467 tx->key->tx_rx_count++; 534 tx->key->tx_rx_count++;
468 /* TODO: add threshold stuff again */ 535 /* TODO: add threshold stuff again */
469 536
@@ -480,16 +547,32 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
480 !ieee80211_use_mfp(hdr->frame_control, tx->sta, 547 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
481 tx->skb)) 548 tx->skb))
482 tx->key = NULL; 549 tx->key = NULL;
550 else
551 skip_hw = (tx->key->conf.flags &
552 IEEE80211_KEY_FLAG_SW_MGMT) &&
553 ieee80211_is_mgmt(hdr->frame_control);
483 break; 554 break;
484 case ALG_AES_CMAC: 555 case ALG_AES_CMAC:
485 if (!ieee80211_is_mgmt(hdr->frame_control)) 556 if (!ieee80211_is_mgmt(hdr->frame_control))
486 tx->key = NULL; 557 tx->key = NULL;
487 break; 558 break;
488 } 559 }
560
561 if (!skip_hw && tx->key &&
562 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
563 info->control.hw_key = &tx->key->conf;
489 } 564 }
490 565
491 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 566 return TX_CONTINUE;
492 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 567}
568
569static ieee80211_tx_result debug_noinline
570ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
571{
572 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
573
574 if (tx->sta && tx->sta->uploaded)
575 info->control.sta = &tx->sta->sta;
493 576
494 return TX_CONTINUE; 577 return TX_CONTINUE;
495} 578}
@@ -519,7 +602,12 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
519 txrc.bss_conf = &tx->sdata->vif.bss_conf; 602 txrc.bss_conf = &tx->sdata->vif.bss_conf;
520 txrc.skb = tx->skb; 603 txrc.skb = tx->skb;
521 txrc.reported_rate.idx = -1; 604 txrc.reported_rate.idx = -1;
522 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx; 605 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
606 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
607 txrc.max_rate_idx = -1;
608 else
609 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
610 txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
523 611
524 /* set up RTS protection if desired */ 612 /* set up RTS protection if desired */
525 if (len > tx->local->hw.wiphy->rts_threshold) { 613 if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -549,7 +637,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
549 "%s: Dropped data frame as no usable bitrate found while " 637 "%s: Dropped data frame as no usable bitrate found while "
550 "scanning and associated. Target station: " 638 "scanning and associated. Target station: "
551 "%pM on %d GHz band\n", 639 "%pM on %d GHz band\n",
552 tx->dev->name, hdr->addr1, 640 tx->sdata->name, hdr->addr1,
553 tx->channel->band ? 5 : 2)) 641 tx->channel->band ? 5 : 2))
554 return TX_DROP; 642 return TX_DROP;
555 643
@@ -664,17 +752,6 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
664} 752}
665 753
666static ieee80211_tx_result debug_noinline 754static ieee80211_tx_result debug_noinline
667ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
668{
669 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
670
671 if (tx->sta)
672 info->control.sta = &tx->sta->sta;
673
674 return TX_CONTINUE;
675}
676
677static ieee80211_tx_result debug_noinline
678ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) 755ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
679{ 756{
680 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 757 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
@@ -933,7 +1010,8 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
933 (struct ieee80211_radiotap_header *) skb->data; 1010 (struct ieee80211_radiotap_header *) skb->data;
934 struct ieee80211_supported_band *sband; 1011 struct ieee80211_supported_band *sband;
935 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1012 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
936 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 1013 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1014 NULL);
937 1015
938 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 1016 sband = tx->local->hw.wiphy->bands[tx->channel->band];
939 1017
@@ -969,7 +1047,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
969 * because it will be recomputed and added 1047 * because it will be recomputed and added
970 * on transmission 1048 * on transmission
971 */ 1049 */
972 if (skb->len < (iterator.max_length + FCS_LEN)) 1050 if (skb->len < (iterator._max_length + FCS_LEN))
973 return false; 1051 return false;
974 1052
975 skb_trim(skb, skb->len - FCS_LEN); 1053 skb_trim(skb, skb->len - FCS_LEN);
@@ -996,10 +1074,10 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
996 1074
997 /* 1075 /*
998 * remove the radiotap header 1076 * remove the radiotap header
999 * iterator->max_length was sanity-checked against 1077 * iterator->_max_length was sanity-checked against
1000 * skb->len by iterator init 1078 * skb->len by iterator init
1001 */ 1079 */
1002 skb_pull(skb, iterator.max_length); 1080 skb_pull(skb, iterator._max_length);
1003 1081
1004 return true; 1082 return true;
1005} 1083}
@@ -1021,7 +1099,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1021 1099
1022 memset(tx, 0, sizeof(*tx)); 1100 memset(tx, 0, sizeof(*tx));
1023 tx->skb = skb; 1101 tx->skb = skb;
1024 tx->dev = sdata->dev; /* use original interface */
1025 tx->local = local; 1102 tx->local = local;
1026 tx->sdata = sdata; 1103 tx->sdata = sdata;
1027 tx->channel = local->hw.conf.channel; 1104 tx->channel = local->hw.conf.channel;
@@ -1032,7 +1109,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1032 tx->flags |= IEEE80211_TX_FRAGMENTED; 1109 tx->flags |= IEEE80211_TX_FRAGMENTED;
1033 1110
1034 /* process and remove the injection radiotap header */ 1111 /* process and remove the injection radiotap header */
1035 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { 1112 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
1036 if (!__ieee80211_parse_tx_radiotap(tx, skb)) 1113 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1037 return TX_DROP; 1114 return TX_DROP;
1038 1115
@@ -1041,6 +1118,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1041 * the radiotap header that was present and pre-filled 1118 * the radiotap header that was present and pre-filled
1042 * 'tx' with tx control information. 1119 * 'tx' with tx control information.
1043 */ 1120 */
1121 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
1044 } 1122 }
1045 1123
1046 /* 1124 /*
@@ -1052,10 +1130,15 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1052 1130
1053 hdr = (struct ieee80211_hdr *) skb->data; 1131 hdr = (struct ieee80211_hdr *) skb->data;
1054 1132
1055 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1133 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1056 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1134 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1135 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1136 return TX_DROP;
1137 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
1138 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1139 }
1057 if (!tx->sta) 1140 if (!tx->sta)
1058 tx->sta = sta_info_get(local, hdr->addr1); 1141 tx->sta = sta_info_get(sdata, hdr->addr1);
1059 1142
1060 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1143 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1061 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1144 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1207,6 +1290,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1207static int invoke_tx_handlers(struct ieee80211_tx_data *tx) 1290static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1208{ 1291{
1209 struct sk_buff *skb = tx->skb; 1292 struct sk_buff *skb = tx->skb;
1293 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1210 ieee80211_tx_result res = TX_DROP; 1294 ieee80211_tx_result res = TX_DROP;
1211 1295
1212#define CALL_TXH(txh) \ 1296#define CALL_TXH(txh) \
@@ -1216,13 +1300,18 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1216 goto txh_done; \ 1300 goto txh_done; \
1217 } while (0) 1301 } while (0)
1218 1302
1303 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1219 CALL_TXH(ieee80211_tx_h_check_assoc); 1304 CALL_TXH(ieee80211_tx_h_check_assoc);
1220 CALL_TXH(ieee80211_tx_h_ps_buf); 1305 CALL_TXH(ieee80211_tx_h_ps_buf);
1221 CALL_TXH(ieee80211_tx_h_select_key); 1306 CALL_TXH(ieee80211_tx_h_select_key);
1222 CALL_TXH(ieee80211_tx_h_michael_mic_add); 1307 CALL_TXH(ieee80211_tx_h_sta);
1223 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1308 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1224 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1309 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1225 CALL_TXH(ieee80211_tx_h_misc); 1310
1311 if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
1312 goto txh_done;
1313
1314 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1226 CALL_TXH(ieee80211_tx_h_sequence); 1315 CALL_TXH(ieee80211_tx_h_sequence);
1227 CALL_TXH(ieee80211_tx_h_fragment); 1316 CALL_TXH(ieee80211_tx_h_fragment);
1228 /* handlers after fragment must be aware of tx info fragmentation! */ 1317 /* handlers after fragment must be aware of tx info fragmentation! */
@@ -1398,34 +1487,6 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1398 return 0; 1487 return 0;
1399} 1488}
1400 1489
1401static bool need_dynamic_ps(struct ieee80211_local *local)
1402{
1403 /* driver doesn't support power save */
1404 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1405 return false;
1406
1407 /* hardware does dynamic power save */
1408 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1409 return false;
1410
1411 /* dynamic power save disabled */
1412 if (local->hw.conf.dynamic_ps_timeout <= 0)
1413 return false;
1414
1415 /* we are scanning, don't enable power save */
1416 if (local->scanning)
1417 return false;
1418
1419 if (!local->ps_sdata)
1420 return false;
1421
1422 /* No point if we're going to suspend */
1423 if (local->quiescing)
1424 return false;
1425
1426 return true;
1427}
1428
1429static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1490static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1430 struct sk_buff *skb) 1491 struct sk_buff *skb)
1431{ 1492{
@@ -1436,25 +1497,14 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1436 int headroom; 1497 int headroom;
1437 bool may_encrypt; 1498 bool may_encrypt;
1438 1499
1439 if (need_dynamic_ps(local)) {
1440 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1441 ieee80211_stop_queues_by_reason(&local->hw,
1442 IEEE80211_QUEUE_STOP_REASON_PS);
1443 ieee80211_queue_work(&local->hw,
1444 &local->dynamic_ps_disable_work);
1445 }
1446
1447 mod_timer(&local->dynamic_ps_timer, jiffies +
1448 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1449 }
1450
1451 rcu_read_lock(); 1500 rcu_read_lock();
1452 1501
1453 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { 1502 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1454 int hdrlen; 1503 int hdrlen;
1455 u16 len_rthdr; 1504 u16 len_rthdr;
1456 1505
1457 info->flags |= IEEE80211_TX_CTL_INJECTED; 1506 info->flags |= IEEE80211_TX_CTL_INJECTED |
1507 IEEE80211_TX_INTFL_HAS_RADIOTAP;
1458 1508
1459 len_rthdr = ieee80211_get_radiotap_len(skb->data); 1509 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1460 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); 1510 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
@@ -1474,11 +1524,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1474 1524
1475 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, 1525 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1476 list) { 1526 list) {
1477 if (!netif_running(tmp_sdata->dev)) 1527 if (!ieee80211_sdata_running(tmp_sdata))
1478 continue; 1528 continue;
1479 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) 1529 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
1480 continue; 1530 continue;
1481 if (compare_ether_addr(tmp_sdata->dev->dev_addr, 1531 if (compare_ether_addr(tmp_sdata->vif.addr,
1482 hdr->addr2) == 0) { 1532 hdr->addr2) == 0) {
1483 sdata = tmp_sdata; 1533 sdata = tmp_sdata;
1484 break; 1534 break;
@@ -1642,7 +1692,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1642 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1692 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1643 /* RA TA DA SA */ 1693 /* RA TA DA SA */
1644 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); 1694 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1645 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1695 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1646 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1696 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1647 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1697 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1648 hdrlen = 30; 1698 hdrlen = 30;
@@ -1656,7 +1706,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1656 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1706 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1657 /* DA BSSID SA */ 1707 /* DA BSSID SA */
1658 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1708 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1659 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1709 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1660 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1710 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1661 hdrlen = 24; 1711 hdrlen = 24;
1662 break; 1712 break;
@@ -1664,7 +1714,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1664 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1714 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1665 /* RA TA DA SA */ 1715 /* RA TA DA SA */
1666 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1716 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1667 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1717 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1668 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1718 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1669 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1719 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1670 hdrlen = 30; 1720 hdrlen = 30;
@@ -1678,8 +1728,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1678 goto fail; 1728 goto fail;
1679 } 1729 }
1680 1730
1681 if (compare_ether_addr(dev->dev_addr, 1731 if (compare_ether_addr(sdata->vif.addr,
1682 skb->data + ETH_ALEN) == 0) { 1732 skb->data + ETH_ALEN) == 0) {
1683 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1733 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1684 skb->data, skb->data + ETH_ALEN); 1734 skb->data, skb->data + ETH_ALEN);
1685 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1735 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
@@ -1709,7 +1759,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1709 } 1759 }
1710 } 1760 }
1711 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1761 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1712 mesh_da, dev->dev_addr); 1762 mesh_da, sdata->vif.addr);
1713 rcu_read_unlock(); 1763 rcu_read_unlock();
1714 if (is_mesh_mcast) 1764 if (is_mesh_mcast)
1715 meshhdrlen = 1765 meshhdrlen =
@@ -1734,7 +1784,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1734 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) { 1784 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
1735 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1785 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1736 /* RA TA DA SA */ 1786 /* RA TA DA SA */
1737 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1787 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1738 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1788 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1739 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1789 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1740 hdrlen = 30; 1790 hdrlen = 30;
@@ -1765,9 +1815,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1765 */ 1815 */
1766 if (!is_multicast_ether_addr(hdr.addr1)) { 1816 if (!is_multicast_ether_addr(hdr.addr1)) {
1767 rcu_read_lock(); 1817 rcu_read_lock();
1768 sta = sta_info_get(local, hdr.addr1); 1818 sta = sta_info_get(sdata, hdr.addr1);
1769 /* XXX: in the future, use sdata to look up the sta */ 1819 if (sta)
1770 if (sta && sta->sdata == sdata)
1771 sta_flags = get_sta_flags(sta); 1820 sta_flags = get_sta_flags(sta);
1772 rcu_read_unlock(); 1821 rcu_read_unlock();
1773 } 1822 }
@@ -1786,7 +1835,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1786 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1835 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1787 !(sta_flags & WLAN_STA_AUTHORIZED) && 1836 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1788 !(ethertype == ETH_P_PAE && 1837 !(ethertype == ETH_P_PAE &&
1789 compare_ether_addr(dev->dev_addr, 1838 compare_ether_addr(sdata->vif.addr,
1790 skb->data + ETH_ALEN) == 0))) { 1839 skb->data + ETH_ALEN) == 0))) {
1791#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1840#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1792 if (net_ratelimit()) 1841 if (net_ratelimit())
@@ -1926,7 +1975,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1926 ieee80211_tx(sdata, skb, true); 1975 ieee80211_tx(sdata, skb, true);
1927 } else { 1976 } else {
1928 hdr = (struct ieee80211_hdr *)skb->data; 1977 hdr = (struct ieee80211_hdr *)skb->data;
1929 sta = sta_info_get(local, hdr->addr1); 1978 sta = sta_info_get(sdata, hdr->addr1);
1930 1979
1931 ret = __ieee80211_tx(local, &skb, sta, true); 1980 ret = __ieee80211_tx(local, &skb, sta, true);
1932 if (ret != IEEE80211_TX_OK) 1981 if (ret != IEEE80211_TX_OK)
@@ -1942,6 +1991,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1942void ieee80211_tx_pending(unsigned long data) 1991void ieee80211_tx_pending(unsigned long data)
1943{ 1992{
1944 struct ieee80211_local *local = (struct ieee80211_local *)data; 1993 struct ieee80211_local *local = (struct ieee80211_local *)data;
1994 struct ieee80211_sub_if_data *sdata;
1945 unsigned long flags; 1995 unsigned long flags;
1946 int i; 1996 int i;
1947 bool txok; 1997 bool txok;
@@ -1980,6 +2030,11 @@ void ieee80211_tx_pending(unsigned long data)
1980 if (!txok) 2030 if (!txok)
1981 break; 2031 break;
1982 } 2032 }
2033
2034 if (skb_queue_empty(&local->pending[i]))
2035 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2036 netif_tx_wake_queue(
2037 netdev_get_tx_queue(sdata->dev, i));
1983 } 2038 }
1984 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2039 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1985 2040
@@ -2062,6 +2117,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2062 struct beacon_data *beacon; 2117 struct beacon_data *beacon;
2063 struct ieee80211_supported_band *sband; 2118 struct ieee80211_supported_band *sband;
2064 enum ieee80211_band band = local->hw.conf.channel->band; 2119 enum ieee80211_band band = local->hw.conf.channel->band;
2120 struct ieee80211_tx_rate_control txrc;
2065 2121
2066 sband = local->hw.wiphy->bands[band]; 2122 sband = local->hw.wiphy->bands[band];
2067 2123
@@ -2150,8 +2206,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2150 mgmt->frame_control = 2206 mgmt->frame_control =
2151 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2207 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2152 memset(mgmt->da, 0xff, ETH_ALEN); 2208 memset(mgmt->da, 0xff, ETH_ALEN);
2153 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2209 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2154 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 2210 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2155 mgmt->u.beacon.beacon_int = 2211 mgmt->u.beacon.beacon_int =
2156 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2212 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2157 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2213 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2169,21 +2225,25 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2169 info = IEEE80211_SKB_CB(skb); 2225 info = IEEE80211_SKB_CB(skb);
2170 2226
2171 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 2227 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2228 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2172 info->band = band; 2229 info->band = band;
2173 /* 2230
2174 * XXX: For now, always use the lowest rate 2231 memset(&txrc, 0, sizeof(txrc));
2175 */ 2232 txrc.hw = hw;
2176 info->control.rates[0].idx = 0; 2233 txrc.sband = sband;
2177 info->control.rates[0].count = 1; 2234 txrc.bss_conf = &sdata->vif.bss_conf;
2178 info->control.rates[1].idx = -1; 2235 txrc.skb = skb;
2179 info->control.rates[2].idx = -1; 2236 txrc.reported_rate.idx = -1;
2180 info->control.rates[3].idx = -1; 2237 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2181 info->control.rates[4].idx = -1; 2238 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
2182 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 2239 txrc.max_rate_idx = -1;
2240 else
2241 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2242 txrc.ap = true;
2243 rate_control_get_rate(sdata, NULL, &txrc);
2183 2244
2184 info->control.vif = vif; 2245 info->control.vif = vif;
2185 2246
2186 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2187 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 2247 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
2188 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 2248 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
2189 out: 2249 out:
@@ -2192,6 +2252,134 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2192} 2252}
2193EXPORT_SYMBOL(ieee80211_beacon_get_tim); 2253EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2194 2254
2255struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2256 struct ieee80211_vif *vif)
2257{
2258 struct ieee80211_sub_if_data *sdata;
2259 struct ieee80211_if_managed *ifmgd;
2260 struct ieee80211_pspoll *pspoll;
2261 struct ieee80211_local *local;
2262 struct sk_buff *skb;
2263
2264 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2265 return NULL;
2266
2267 sdata = vif_to_sdata(vif);
2268 ifmgd = &sdata->u.mgd;
2269 local = sdata->local;
2270
2271 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2272 if (!skb) {
2273 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2274 "pspoll template\n", sdata->name);
2275 return NULL;
2276 }
2277 skb_reserve(skb, local->hw.extra_tx_headroom);
2278
2279 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
2280 memset(pspoll, 0, sizeof(*pspoll));
2281 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
2282 IEEE80211_STYPE_PSPOLL);
2283 pspoll->aid = cpu_to_le16(ifmgd->aid);
2284
2285 /* aid in PS-Poll has its two MSBs each set to 1 */
2286 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
2287
2288 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
2289 memcpy(pspoll->ta, vif->addr, ETH_ALEN);
2290
2291 return skb;
2292}
2293EXPORT_SYMBOL(ieee80211_pspoll_get);
2294
2295struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2296 struct ieee80211_vif *vif)
2297{
2298 struct ieee80211_hdr_3addr *nullfunc;
2299 struct ieee80211_sub_if_data *sdata;
2300 struct ieee80211_if_managed *ifmgd;
2301 struct ieee80211_local *local;
2302 struct sk_buff *skb;
2303
2304 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2305 return NULL;
2306
2307 sdata = vif_to_sdata(vif);
2308 ifmgd = &sdata->u.mgd;
2309 local = sdata->local;
2310
2311 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2312 if (!skb) {
2313 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2314 "template\n", sdata->name);
2315 return NULL;
2316 }
2317 skb_reserve(skb, local->hw.extra_tx_headroom);
2318
2319 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
2320 sizeof(*nullfunc));
2321 memset(nullfunc, 0, sizeof(*nullfunc));
2322 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2323 IEEE80211_STYPE_NULLFUNC |
2324 IEEE80211_FCTL_TODS);
2325 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
2326 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
2327 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
2328
2329 return skb;
2330}
2331EXPORT_SYMBOL(ieee80211_nullfunc_get);
2332
2333struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2334 struct ieee80211_vif *vif,
2335 const u8 *ssid, size_t ssid_len,
2336 const u8 *ie, size_t ie_len)
2337{
2338 struct ieee80211_sub_if_data *sdata;
2339 struct ieee80211_local *local;
2340 struct ieee80211_hdr_3addr *hdr;
2341 struct sk_buff *skb;
2342 size_t ie_ssid_len;
2343 u8 *pos;
2344
2345 sdata = vif_to_sdata(vif);
2346 local = sdata->local;
2347 ie_ssid_len = 2 + ssid_len;
2348
2349 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2350 ie_ssid_len + ie_len);
2351 if (!skb) {
2352 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2353 "request template\n", sdata->name);
2354 return NULL;
2355 }
2356
2357 skb_reserve(skb, local->hw.extra_tx_headroom);
2358
2359 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
2360 memset(hdr, 0, sizeof(*hdr));
2361 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2362 IEEE80211_STYPE_PROBE_REQ);
2363 memset(hdr->addr1, 0xff, ETH_ALEN);
2364 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2365 memset(hdr->addr3, 0xff, ETH_ALEN);
2366
2367 pos = skb_put(skb, ie_ssid_len);
2368 *pos++ = WLAN_EID_SSID;
2369 *pos++ = ssid_len;
2370 if (ssid)
2371 memcpy(pos, ssid, ssid_len);
2372 pos += ssid_len;
2373
2374 if (ie) {
2375 pos = skb_put(skb, ie_len);
2376 memcpy(pos, ie, ie_len);
2377 }
2378
2379 return skb;
2380}
2381EXPORT_SYMBOL(ieee80211_probereq_get);
2382
2195void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2383void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2196 const void *frame, size_t frame_len, 2384 const void *frame, size_t frame_len,
2197 const struct ieee80211_tx_info *frame_txctl, 2385 const struct ieee80211_tx_info *frame_txctl,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3848140313f5..53af57047435 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -18,7 +18,6 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/wireless.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/crc32.h> 22#include <linux/crc32.h>
24#include <net/net_namespace.h> 23#include <net/net_namespace.h>
@@ -280,13 +279,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
280 /* someone still has this queue stopped */ 279 /* someone still has this queue stopped */
281 return; 280 return;
282 281
283 if (!skb_queue_empty(&local->pending[queue])) 282 if (skb_queue_empty(&local->pending[queue])) {
283 rcu_read_lock();
284 list_for_each_entry_rcu(sdata, &local->interfaces, list)
285 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
286 rcu_read_unlock();
287 } else
284 tasklet_schedule(&local->tx_pending_tasklet); 288 tasklet_schedule(&local->tx_pending_tasklet);
285
286 rcu_read_lock();
287 list_for_each_entry_rcu(sdata, &local->interfaces, list)
288 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
289 rcu_read_unlock();
290} 289}
291 290
292void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 291void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -480,8 +479,8 @@ void ieee80211_iterate_active_interfaces(
480 case NL80211_IFTYPE_MESH_POINT: 479 case NL80211_IFTYPE_MESH_POINT:
481 break; 480 break;
482 } 481 }
483 if (netif_running(sdata->dev)) 482 if (ieee80211_sdata_running(sdata))
484 iterator(data, sdata->dev->dev_addr, 483 iterator(data, sdata->vif.addr,
485 &sdata->vif); 484 &sdata->vif);
486 } 485 }
487 486
@@ -514,8 +513,8 @@ void ieee80211_iterate_active_interfaces_atomic(
514 case NL80211_IFTYPE_MESH_POINT: 513 case NL80211_IFTYPE_MESH_POINT:
515 break; 514 break;
516 } 515 }
517 if (netif_running(sdata->dev)) 516 if (ieee80211_sdata_running(sdata))
518 iterator(data, sdata->dev->dev_addr, 517 iterator(data, sdata->vif.addr,
519 &sdata->vif); 518 &sdata->vif);
520 } 519 }
521 520
@@ -793,6 +792,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
793 break; 792 break;
794 } 793 }
795 794
795 qparam.uapsd = false;
796
796 drv_conf_tx(local, queue, &qparam); 797 drv_conf_tx(local, queue, &qparam);
797 } 798 }
798} 799}
@@ -860,7 +861,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
860 sizeof(*mgmt) + 6 + extra_len); 861 sizeof(*mgmt) + 6 + extra_len);
861 if (!skb) { 862 if (!skb) {
862 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 863 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
863 "frame\n", sdata->dev->name); 864 "frame\n", sdata->name);
864 return; 865 return;
865 } 866 }
866 skb_reserve(skb, local->hw.extra_tx_headroom); 867 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -870,7 +871,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
870 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 871 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
871 IEEE80211_STYPE_AUTH); 872 IEEE80211_STYPE_AUTH);
872 memcpy(mgmt->da, bssid, ETH_ALEN); 873 memcpy(mgmt->da, bssid, ETH_ALEN);
873 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 874 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
874 memcpy(mgmt->bssid, bssid, ETH_ALEN); 875 memcpy(mgmt->bssid, bssid, ETH_ALEN);
875 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); 876 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
876 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 877 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -893,43 +894,87 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
893 enum ieee80211_band band) 894 enum ieee80211_band band)
894{ 895{
895 struct ieee80211_supported_band *sband; 896 struct ieee80211_supported_band *sband;
896 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL; 897 u8 *pos;
897 int i; 898 size_t offset = 0, noffset;
899 int supp_rates_len, i;
898 900
899 sband = local->hw.wiphy->bands[band]; 901 sband = local->hw.wiphy->bands[band];
900 902
901 pos = buffer; 903 pos = buffer;
902 904
905 supp_rates_len = min_t(int, sband->n_bitrates, 8);
906
903 *pos++ = WLAN_EID_SUPP_RATES; 907 *pos++ = WLAN_EID_SUPP_RATES;
904 supp_rates_len = pos; 908 *pos++ = supp_rates_len;
905 *pos++ = 0;
906
907 for (i = 0; i < sband->n_bitrates; i++) {
908 struct ieee80211_rate *rate = &sband->bitrates[i];
909
910 if (esupp_rates_len) {
911 *esupp_rates_len += 1;
912 } else if (*supp_rates_len == 8) {
913 *pos++ = WLAN_EID_EXT_SUPP_RATES;
914 esupp_rates_len = pos;
915 *pos++ = 1;
916 } else
917 *supp_rates_len += 1;
918 909
919 *pos++ = rate->bitrate / 5; 910 for (i = 0; i < supp_rates_len; i++) {
911 int rate = sband->bitrates[i].bitrate;
912 *pos++ = (u8) (rate / 5);
913 }
914
915 /* insert "request information" if in custom IEs */
916 if (ie && ie_len) {
917 static const u8 before_extrates[] = {
918 WLAN_EID_SSID,
919 WLAN_EID_SUPP_RATES,
920 WLAN_EID_REQUEST,
921 };
922 noffset = ieee80211_ie_split(ie, ie_len,
923 before_extrates,
924 ARRAY_SIZE(before_extrates),
925 offset);
926 memcpy(pos, ie + offset, noffset - offset);
927 pos += noffset - offset;
928 offset = noffset;
929 }
930
931 if (sband->n_bitrates > i) {
932 *pos++ = WLAN_EID_EXT_SUPP_RATES;
933 *pos++ = sband->n_bitrates - i;
934
935 for (; i < sband->n_bitrates; i++) {
936 int rate = sband->bitrates[i].bitrate;
937 *pos++ = (u8) (rate / 5);
938 }
939 }
940
941 /* insert custom IEs that go before HT */
942 if (ie && ie_len) {
943 static const u8 before_ht[] = {
944 WLAN_EID_SSID,
945 WLAN_EID_SUPP_RATES,
946 WLAN_EID_REQUEST,
947 WLAN_EID_EXT_SUPP_RATES,
948 WLAN_EID_DS_PARAMS,
949 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
950 };
951 noffset = ieee80211_ie_split(ie, ie_len,
952 before_ht, ARRAY_SIZE(before_ht),
953 offset);
954 memcpy(pos, ie + offset, noffset - offset);
955 pos += noffset - offset;
956 offset = noffset;
920 } 957 }
921 958
922 if (sband->ht_cap.ht_supported) { 959 if (sband->ht_cap.ht_supported) {
923 __le16 tmp = cpu_to_le16(sband->ht_cap.cap); 960 u16 cap = sband->ht_cap.cap;
961 __le16 tmp;
962
963 if (ieee80211_disable_40mhz_24ghz &&
964 sband->band == IEEE80211_BAND_2GHZ) {
965 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
966 cap &= ~IEEE80211_HT_CAP_SGI_40;
967 }
924 968
925 *pos++ = WLAN_EID_HT_CAPABILITY; 969 *pos++ = WLAN_EID_HT_CAPABILITY;
926 *pos++ = sizeof(struct ieee80211_ht_cap); 970 *pos++ = sizeof(struct ieee80211_ht_cap);
927 memset(pos, 0, sizeof(struct ieee80211_ht_cap)); 971 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
972 tmp = cpu_to_le16(cap);
928 memcpy(pos, &tmp, sizeof(u16)); 973 memcpy(pos, &tmp, sizeof(u16));
929 pos += sizeof(u16); 974 pos += sizeof(u16);
930 /* TODO: needs a define here for << 2 */
931 *pos++ = sband->ht_cap.ampdu_factor | 975 *pos++ = sband->ht_cap.ampdu_factor |
932 (sband->ht_cap.ampdu_density << 2); 976 (sband->ht_cap.ampdu_density <<
977 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
933 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 978 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
934 pos += sizeof(sband->ht_cap.mcs); 979 pos += sizeof(sband->ht_cap.mcs);
935 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */ 980 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
@@ -940,9 +985,11 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
940 * that calculates local->scan_ies_len. 985 * that calculates local->scan_ies_len.
941 */ 986 */
942 987
943 if (ie) { 988 /* add any remaining custom IEs */
944 memcpy(pos, ie, ie_len); 989 if (ie && ie_len) {
945 pos += ie_len; 990 noffset = ie_len;
991 memcpy(pos, ie + offset, noffset - offset);
992 pos += noffset - offset;
946 } 993 }
947 994
948 return pos - buffer; 995 return pos - buffer;
@@ -955,40 +1002,33 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
955 struct ieee80211_local *local = sdata->local; 1002 struct ieee80211_local *local = sdata->local;
956 struct sk_buff *skb; 1003 struct sk_buff *skb;
957 struct ieee80211_mgmt *mgmt; 1004 struct ieee80211_mgmt *mgmt;
958 u8 *pos; 1005 size_t buf_len;
959 1006 u8 *buf;
960 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 1007
961 ie_len); 1008 /* FIXME: come up with a proper value */
962 if (!skb) { 1009 buf = kmalloc(200 + ie_len, GFP_KERNEL);
963 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 1010 if (!buf) {
964 "request\n", sdata->dev->name); 1011 printk(KERN_DEBUG "%s: failed to allocate temporary IE "
1012 "buffer\n", sdata->name);
965 return; 1013 return;
966 } 1014 }
967 skb_reserve(skb, local->hw.extra_tx_headroom);
968 1015
969 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1016 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
970 memset(mgmt, 0, 24); 1017 local->hw.conf.channel->band);
971 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1018
972 IEEE80211_STYPE_PROBE_REQ); 1019 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
973 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 1020 ssid, ssid_len,
1021 buf, buf_len);
1022
974 if (dst) { 1023 if (dst) {
1024 mgmt = (struct ieee80211_mgmt *) skb->data;
975 memcpy(mgmt->da, dst, ETH_ALEN); 1025 memcpy(mgmt->da, dst, ETH_ALEN);
976 memcpy(mgmt->bssid, dst, ETH_ALEN); 1026 memcpy(mgmt->bssid, dst, ETH_ALEN);
977 } else {
978 memset(mgmt->da, 0xff, ETH_ALEN);
979 memset(mgmt->bssid, 0xff, ETH_ALEN);
980 } 1027 }
981 pos = skb_put(skb, 2 + ssid_len);
982 *pos++ = WLAN_EID_SSID;
983 *pos++ = ssid_len;
984 memcpy(pos, ssid, ssid_len);
985 pos += ssid_len;
986
987 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
988 local->hw.conf.channel->band));
989 1028
990 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1029 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
991 ieee80211_tx_skb(sdata, skb); 1030 ieee80211_tx_skb(sdata, skb);
1031 kfree(buf);
992} 1032}
993 1033
994u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1034u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1032,18 +1072,16 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1032 ieee80211_led_radio(local, false); 1072 ieee80211_led_radio(local, false);
1033 1073
1034 cancel_work_sync(&local->reconfig_filter); 1074 cancel_work_sync(&local->reconfig_filter);
1035 drv_stop(local);
1036 1075
1037 flush_workqueue(local->workqueue); 1076 flush_workqueue(local->workqueue);
1077 drv_stop(local);
1038} 1078}
1039 1079
1040int ieee80211_reconfig(struct ieee80211_local *local) 1080int ieee80211_reconfig(struct ieee80211_local *local)
1041{ 1081{
1042 struct ieee80211_hw *hw = &local->hw; 1082 struct ieee80211_hw *hw = &local->hw;
1043 struct ieee80211_sub_if_data *sdata; 1083 struct ieee80211_sub_if_data *sdata;
1044 struct ieee80211_if_init_conf conf;
1045 struct sta_info *sta; 1084 struct sta_info *sta;
1046 unsigned long flags;
1047 int res; 1085 int res;
1048 1086
1049 if (local->suspended) 1087 if (local->suspended)
@@ -1059,9 +1097,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1059 */ 1097 */
1060 res = drv_start(local); 1098 res = drv_start(local);
1061 if (res) { 1099 if (res) {
1062 WARN(local->suspended, "Harware became unavailable " 1100 WARN(local->suspended, "Hardware became unavailable "
1063 "upon resume. This is could be a software issue" 1101 "upon resume. This could be a software issue "
1064 "prior to suspend or a harware issue\n"); 1102 "prior to suspend or a hardware issue.\n");
1065 return res; 1103 return res;
1066 } 1104 }
1067 1105
@@ -1072,29 +1110,24 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1072 list_for_each_entry(sdata, &local->interfaces, list) { 1110 list_for_each_entry(sdata, &local->interfaces, list) {
1073 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1111 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1074 sdata->vif.type != NL80211_IFTYPE_MONITOR && 1112 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
1075 netif_running(sdata->dev)) { 1113 ieee80211_sdata_running(sdata))
1076 conf.vif = &sdata->vif; 1114 res = drv_add_interface(local, &sdata->vif);
1077 conf.type = sdata->vif.type;
1078 conf.mac_addr = sdata->dev->dev_addr;
1079 res = drv_add_interface(local, &conf);
1080 }
1081 } 1115 }
1082 1116
1083 /* add STAs back */ 1117 /* add STAs back */
1084 if (local->ops->sta_notify) { 1118 mutex_lock(&local->sta_mtx);
1085 spin_lock_irqsave(&local->sta_lock, flags); 1119 list_for_each_entry(sta, &local->sta_list, list) {
1086 list_for_each_entry(sta, &local->sta_list, list) { 1120 if (sta->uploaded) {
1087 sdata = sta->sdata; 1121 sdata = sta->sdata;
1088 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1122 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1089 sdata = container_of(sdata->bss, 1123 sdata = container_of(sdata->bss,
1090 struct ieee80211_sub_if_data, 1124 struct ieee80211_sub_if_data,
1091 u.ap); 1125 u.ap);
1092 1126
1093 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, 1127 WARN_ON(drv_sta_add(local, sdata, &sta->sta));
1094 &sta->sta);
1095 } 1128 }
1096 spin_unlock_irqrestore(&local->sta_lock, flags);
1097 } 1129 }
1130 mutex_unlock(&local->sta_mtx);
1098 1131
1099 /* Clear Suspend state so that ADDBA requests can be processed */ 1132 /* Clear Suspend state so that ADDBA requests can be processed */
1100 1133
@@ -1119,7 +1152,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1119 /* Finally also reconfigure all the BSS information */ 1152 /* Finally also reconfigure all the BSS information */
1120 list_for_each_entry(sdata, &local->interfaces, list) { 1153 list_for_each_entry(sdata, &local->interfaces, list) {
1121 u32 changed = ~0; 1154 u32 changed = ~0;
1122 if (!netif_running(sdata->dev)) 1155 if (!ieee80211_sdata_running(sdata))
1123 continue; 1156 continue;
1124 switch (sdata->vif.type) { 1157 switch (sdata->vif.type) {
1125 case NL80211_IFTYPE_STATION: 1158 case NL80211_IFTYPE_STATION:
@@ -1145,9 +1178,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1145 } 1178 }
1146 } 1179 }
1147 1180
1181 rcu_read_lock();
1182 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1183 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1184 ieee80211_sta_tear_down_BA_sessions(sta);
1185 }
1186 }
1187 rcu_read_unlock();
1188
1148 /* add back keys */ 1189 /* add back keys */
1149 list_for_each_entry(sdata, &local->interfaces, list) 1190 list_for_each_entry(sdata, &local->interfaces, list)
1150 if (netif_running(sdata->dev)) 1191 if (ieee80211_sdata_running(sdata))
1151 ieee80211_enable_keys(sdata); 1192 ieee80211_enable_keys(sdata);
1152 1193
1153 ieee80211_wake_queues_by_reason(hw, 1194 ieee80211_wake_queues_by_reason(hw,
@@ -1184,13 +1225,143 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1184 1225
1185 add_timer(&local->sta_cleanup); 1226 add_timer(&local->sta_cleanup);
1186 1227
1187 spin_lock_irqsave(&local->sta_lock, flags); 1228 mutex_lock(&local->sta_mtx);
1188 list_for_each_entry(sta, &local->sta_list, list) 1229 list_for_each_entry(sta, &local->sta_list, list)
1189 mesh_plink_restart(sta); 1230 mesh_plink_restart(sta);
1190 spin_unlock_irqrestore(&local->sta_lock, flags); 1231 mutex_unlock(&local->sta_mtx);
1191#else 1232#else
1192 WARN_ON(1); 1233 WARN_ON(1);
1193#endif 1234#endif
1194 return 0; 1235 return 0;
1195} 1236}
1196 1237
1238static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
1239 enum ieee80211_smps_mode *smps_mode)
1240{
1241 if (ifmgd->associated) {
1242 *smps_mode = ifmgd->ap_smps;
1243
1244 if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1245 if (ifmgd->powersave)
1246 *smps_mode = IEEE80211_SMPS_DYNAMIC;
1247 else
1248 *smps_mode = IEEE80211_SMPS_OFF;
1249 }
1250
1251 return 1;
1252 }
1253
1254 return 0;
1255}
1256
1257/* must hold iflist_mtx */
1258void ieee80211_recalc_smps(struct ieee80211_local *local,
1259 struct ieee80211_sub_if_data *forsdata)
1260{
1261 struct ieee80211_sub_if_data *sdata;
1262 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
1263 int count = 0;
1264
1265 if (forsdata)
1266 WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx));
1267
1268 WARN_ON(!mutex_is_locked(&local->iflist_mtx));
1269
1270 /*
1271 * This function could be improved to handle multiple
1272 * interfaces better, but right now it makes any
1273 * non-station interfaces force SM PS to be turned
1274 * off. If there are multiple station interfaces it
1275 * could also use the best possible mode, e.g. if
1276 * one is in static and the other in dynamic then
1277 * dynamic is ok.
1278 */
1279
1280 list_for_each_entry(sdata, &local->interfaces, list) {
1281 if (!netif_running(sdata->dev))
1282 continue;
1283 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1284 goto set;
1285 if (sdata != forsdata) {
1286 /*
1287 * This nested is ok -- we are holding the iflist_mtx
1288 * so can't get here twice or so. But it's required
1289 * since normally we acquire it first and then the
1290 * iflist_mtx.
1291 */
1292 mutex_lock_nested(&sdata->u.mgd.mtx, SINGLE_DEPTH_NESTING);
1293 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1294 mutex_unlock(&sdata->u.mgd.mtx);
1295 } else
1296 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1297
1298 if (count > 1) {
1299 smps_mode = IEEE80211_SMPS_OFF;
1300 break;
1301 }
1302 }
1303
1304 if (smps_mode == local->smps_mode)
1305 return;
1306
1307 set:
1308 local->smps_mode = smps_mode;
1309 /* changed flag is auto-detected for this */
1310 ieee80211_hw_config(local, 0);
1311}
1312
1313static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
1314{
1315 int i;
1316
1317 for (i = 0; i < n_ids; i++)
1318 if (ids[i] == id)
1319 return true;
1320 return false;
1321}
1322
1323/**
1324 * ieee80211_ie_split - split an IE buffer according to ordering
1325 *
1326 * @ies: the IE buffer
1327 * @ielen: the length of the IE buffer
1328 * @ids: an array with element IDs that are allowed before
1329 * the split
1330 * @n_ids: the size of the element ID array
1331 * @offset: offset where to start splitting in the buffer
1332 *
1333 * This function splits an IE buffer by updating the @offset
1334 * variable to point to the location where the buffer should be
1335 * split.
1336 *
1337 * It assumes that the given IE buffer is well-formed, this
1338 * has to be guaranteed by the caller!
1339 *
1340 * It also assumes that the IEs in the buffer are ordered
1341 * correctly, if not the result of using this function will not
1342 * be ordered correctly either, i.e. it does no reordering.
1343 *
1344 * The function returns the offset where the next part of the
1345 * buffer starts, which may be @ielen if the entire (remainder)
1346 * of the buffer should be used.
1347 */
1348size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1349 const u8 *ids, int n_ids, size_t offset)
1350{
1351 size_t pos = offset;
1352
1353 while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
1354 pos += 2 + ies[pos + 1];
1355
1356 return pos;
1357}
1358
1359size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
1360{
1361 size_t pos = offset;
1362
1363 while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC)
1364 pos += 2 + ies[pos + 1];
1365
1366 return pos;
1367}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 247123fe1a7a..5f3a4113bda1 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -17,6 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20#include <linux/slab.h>
20#include <asm/unaligned.h> 21#include <asm/unaligned.h>
21 22
22#include <net/mac80211.h> 23#include <net/mac80211.h>
@@ -305,20 +306,19 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
305{ 306{
306 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 307 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
307 308
308 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 309 if (!info->control.hw_key) {
309 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, 310 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
310 tx->key->conf.keylen, 311 tx->key->conf.keylen,
311 tx->key->conf.keyidx)) 312 tx->key->conf.keyidx))
312 return -1; 313 return -1;
313 } else { 314 } else if (info->control.hw_key->flags &
314 info->control.hw_key = &tx->key->conf; 315 IEEE80211_KEY_FLAG_GENERATE_IV) {
315 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { 316 if (!ieee80211_wep_add_iv(tx->local, skb,
316 if (!ieee80211_wep_add_iv(tx->local, skb, 317 tx->key->conf.keylen,
317 tx->key->conf.keylen, 318 tx->key->conf.keyidx))
318 tx->key->conf.keyidx)) 319 return -1;
319 return -1;
320 }
321 } 320 }
321
322 return 0; 322 return 0;
323} 323}
324 324
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 79d887dae738..34e6d02da779 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -96,7 +96,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
96 } 96 }
97 97
98 if (!sta && ra && !is_multicast_ether_addr(ra)) { 98 if (!sta && ra && !is_multicast_ether_addr(ra)) {
99 sta = sta_info_get(local, ra); 99 sta = sta_info_get(sdata, ra);
100 if (sta) 100 if (sta)
101 sta_flags = get_sta_flags(sta); 101 sta_flags = get_sta_flags(sta);
102 } 102 }
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
new file mode 100644
index 000000000000..15e1ba931b87
--- /dev/null
+++ b/net/mac80211/work.c
@@ -0,0 +1,1101 @@
1/*
2 * mac80211 work implementation
3 *
4 * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/if_ether.h>
18#include <linux/skbuff.h>
19#include <linux/if_arp.h>
20#include <linux/etherdevice.h>
21#include <linux/crc32.h>
22#include <linux/slab.h>
23#include <net/mac80211.h>
24#include <asm/unaligned.h>
25
26#include "ieee80211_i.h"
27#include "rate.h"
28
29#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
30#define IEEE80211_AUTH_MAX_TRIES 3
31#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
32#define IEEE80211_ASSOC_MAX_TRIES 3
33#define IEEE80211_MAX_PROBE_TRIES 5
34
35enum work_action {
36 WORK_ACT_NONE,
37 WORK_ACT_TIMEOUT,
38 WORK_ACT_DONE,
39};
40
41
42/* utils */
43static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
44{
45 WARN_ON(!mutex_is_locked(&local->work_mtx));
46}
47
48/*
49 * We can have multiple work items (and connection probing)
50 * scheduling this timer, but we need to take care to only
51 * reschedule it when it should fire _earlier_ than it was
52 * asked for before, or if it's not pending right now. This
53 * function ensures that. Note that it then is required to
54 * run this function for all timeouts after the first one
55 * has happened -- the work that runs from this timer will
56 * do that.
57 */
58static void run_again(struct ieee80211_local *local,
59 unsigned long timeout)
60{
61 ASSERT_WORK_MTX(local);
62
63 if (!timer_pending(&local->work_timer) ||
64 time_before(timeout, local->work_timer.expires))
65 mod_timer(&local->work_timer, timeout);
66}
67
68static void work_free_rcu(struct rcu_head *head)
69{
70 struct ieee80211_work *wk =
71 container_of(head, struct ieee80211_work, rcu_head);
72
73 kfree(wk);
74}
75
76void free_work(struct ieee80211_work *wk)
77{
78 call_rcu(&wk->rcu_head, work_free_rcu);
79}
80
81static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
82 struct ieee80211_supported_band *sband,
83 u32 *rates)
84{
85 int i, j, count;
86 *rates = 0;
87 count = 0;
88 for (i = 0; i < supp_rates_len; i++) {
89 int rate = (supp_rates[i] & 0x7F) * 5;
90
91 for (j = 0; j < sband->n_bitrates; j++)
92 if (sband->bitrates[j].bitrate == rate) {
93 *rates |= BIT(j);
94 count++;
95 break;
96 }
97 }
98
99 return count;
100}
101
102/* frame sending functions */
103
104static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
105 struct ieee80211_supported_band *sband,
106 struct ieee80211_channel *channel,
107 enum ieee80211_smps_mode smps)
108{
109 struct ieee80211_ht_info *ht_info;
110 u8 *pos;
111 u32 flags = channel->flags;
112 u16 cap = sband->ht_cap.cap;
113 __le16 tmp;
114
115 if (!sband->ht_cap.ht_supported)
116 return;
117
118 if (!ht_info_ie)
119 return;
120
121 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
122 return;
123
124 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
125
126 /* determine capability flags */
127
128 if (ieee80211_disable_40mhz_24ghz &&
129 sband->band == IEEE80211_BAND_2GHZ) {
130 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
131 cap &= ~IEEE80211_HT_CAP_SGI_40;
132 }
133
134 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
135 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
136 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
137 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
138 cap &= ~IEEE80211_HT_CAP_SGI_40;
139 }
140 break;
141 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
142 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
143 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
144 cap &= ~IEEE80211_HT_CAP_SGI_40;
145 }
146 break;
147 }
148
149 /* set SM PS mode properly */
150 cap &= ~IEEE80211_HT_CAP_SM_PS;
151 switch (smps) {
152 case IEEE80211_SMPS_AUTOMATIC:
153 case IEEE80211_SMPS_NUM_MODES:
154 WARN_ON(1);
155 case IEEE80211_SMPS_OFF:
156 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
157 IEEE80211_HT_CAP_SM_PS_SHIFT;
158 break;
159 case IEEE80211_SMPS_STATIC:
160 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
161 IEEE80211_HT_CAP_SM_PS_SHIFT;
162 break;
163 case IEEE80211_SMPS_DYNAMIC:
164 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
165 IEEE80211_HT_CAP_SM_PS_SHIFT;
166 break;
167 }
168
169 /* reserve and fill IE */
170
171 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
172 *pos++ = WLAN_EID_HT_CAPABILITY;
173 *pos++ = sizeof(struct ieee80211_ht_cap);
174 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
175
176 /* capability flags */
177 tmp = cpu_to_le16(cap);
178 memcpy(pos, &tmp, sizeof(u16));
179 pos += sizeof(u16);
180
181 /* AMPDU parameters */
182 *pos++ = sband->ht_cap.ampdu_factor |
183 (sband->ht_cap.ampdu_density <<
184 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
185
186 /* MCS set */
187 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
188 pos += sizeof(sband->ht_cap.mcs);
189
190 /* extended capabilities */
191 pos += sizeof(__le16);
192
193 /* BF capabilities */
194 pos += sizeof(__le32);
195
196 /* antenna selection */
197 pos += sizeof(u8);
198}
199
200static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
201 struct ieee80211_work *wk)
202{
203 struct ieee80211_local *local = sdata->local;
204 struct sk_buff *skb;
205 struct ieee80211_mgmt *mgmt;
206 u8 *pos, qos_info;
207 const u8 *ies;
208 size_t offset = 0, noffset;
209 int i, len, count, rates_len, supp_rates_len;
210 u16 capab;
211 struct ieee80211_supported_band *sband;
212 u32 rates = 0;
213
214 sband = local->hw.wiphy->bands[wk->chan->band];
215
216 /*
217 * Get all rates supported by the device and the AP as
218 * some APs don't like getting a superset of their rates
219 * in the association request (e.g. D-Link DAP 1353 in
220 * b-only mode)...
221 */
222 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
223 wk->assoc.supp_rates_len,
224 sband, &rates);
225
226 skb = alloc_skb(local->hw.extra_tx_headroom +
227 sizeof(*mgmt) + /* bit too much but doesn't matter */
228 2 + wk->assoc.ssid_len + /* SSID */
229 4 + rates_len + /* (extended) rates */
230 4 + /* power capability */
231 2 + 2 * sband->n_channels + /* supported channels */
232 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
233 wk->ie_len + /* extra IEs */
234 9, /* WMM */
235 GFP_KERNEL);
236 if (!skb) {
237 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
238 "frame\n", sdata->name);
239 return;
240 }
241 skb_reserve(skb, local->hw.extra_tx_headroom);
242
243 capab = WLAN_CAPABILITY_ESS;
244
245 if (sband->band == IEEE80211_BAND_2GHZ) {
246 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
247 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
248 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
249 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
250 }
251
252 if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
253 capab |= WLAN_CAPABILITY_PRIVACY;
254
255 if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
256 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
257 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
258
259 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
260 memset(mgmt, 0, 24);
261 memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
262 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
263 memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
264
265 if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
266 skb_put(skb, 10);
267 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
268 IEEE80211_STYPE_REASSOC_REQ);
269 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
270 mgmt->u.reassoc_req.listen_interval =
271 cpu_to_le16(local->hw.conf.listen_interval);
272 memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
273 ETH_ALEN);
274 } else {
275 skb_put(skb, 4);
276 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
277 IEEE80211_STYPE_ASSOC_REQ);
278 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
279 mgmt->u.assoc_req.listen_interval =
280 cpu_to_le16(local->hw.conf.listen_interval);
281 }
282
283 /* SSID */
284 ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len);
285 *pos++ = WLAN_EID_SSID;
286 *pos++ = wk->assoc.ssid_len;
287 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
288
289 /* add all rates which were marked to be used above */
290 supp_rates_len = rates_len;
291 if (supp_rates_len > 8)
292 supp_rates_len = 8;
293
294 len = sband->n_bitrates;
295 pos = skb_put(skb, supp_rates_len + 2);
296 *pos++ = WLAN_EID_SUPP_RATES;
297 *pos++ = supp_rates_len;
298
299 count = 0;
300 for (i = 0; i < sband->n_bitrates; i++) {
301 if (BIT(i) & rates) {
302 int rate = sband->bitrates[i].bitrate;
303 *pos++ = (u8) (rate / 5);
304 if (++count == 8)
305 break;
306 }
307 }
308
309 if (rates_len > count) {
310 pos = skb_put(skb, rates_len - count + 2);
311 *pos++ = WLAN_EID_EXT_SUPP_RATES;
312 *pos++ = rates_len - count;
313
314 for (i++; i < sband->n_bitrates; i++) {
315 if (BIT(i) & rates) {
316 int rate = sband->bitrates[i].bitrate;
317 *pos++ = (u8) (rate / 5);
318 }
319 }
320 }
321
322 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
323 /* 1. power capabilities */
324 pos = skb_put(skb, 4);
325 *pos++ = WLAN_EID_PWR_CAPABILITY;
326 *pos++ = 2;
327 *pos++ = 0; /* min tx power */
328 *pos++ = wk->chan->max_power; /* max tx power */
329
330 /* 2. supported channels */
331 /* TODO: get this in reg domain format */
332 pos = skb_put(skb, 2 * sband->n_channels + 2);
333 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
334 *pos++ = 2 * sband->n_channels;
335 for (i = 0; i < sband->n_channels; i++) {
336 *pos++ = ieee80211_frequency_to_channel(
337 sband->channels[i].center_freq);
338 *pos++ = 1; /* one channel in the subband*/
339 }
340 }
341
342 /* if present, add any custom IEs that go before HT */
343 if (wk->ie_len && wk->ie) {
344 static const u8 before_ht[] = {
345 WLAN_EID_SSID,
346 WLAN_EID_SUPP_RATES,
347 WLAN_EID_EXT_SUPP_RATES,
348 WLAN_EID_PWR_CAPABILITY,
349 WLAN_EID_SUPPORTED_CHANNELS,
350 WLAN_EID_RSN,
351 WLAN_EID_QOS_CAPA,
352 WLAN_EID_RRM_ENABLED_CAPABILITIES,
353 WLAN_EID_MOBILITY_DOMAIN,
354 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
355 };
356 noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
357 before_ht, ARRAY_SIZE(before_ht),
358 offset);
359 pos = skb_put(skb, noffset - offset);
360 memcpy(pos, wk->ie + offset, noffset - offset);
361 offset = noffset;
362 }
363
364 if (wk->assoc.use_11n && wk->assoc.wmm_used &&
365 local->hw.queues >= 4)
366 ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
367 sband, wk->chan, wk->assoc.smps);
368
369 /* if present, add any custom non-vendor IEs that go after HT */
370 if (wk->ie_len && wk->ie) {
371 noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
372 offset);
373 pos = skb_put(skb, noffset - offset);
374 memcpy(pos, wk->ie + offset, noffset - offset);
375 offset = noffset;
376 }
377
378 if (wk->assoc.wmm_used && local->hw.queues >= 4) {
379 if (wk->assoc.uapsd_used) {
380 qos_info = local->uapsd_queues;
381 qos_info |= (local->uapsd_max_sp_len <<
382 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
383 } else {
384 qos_info = 0;
385 }
386
387 pos = skb_put(skb, 9);
388 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
389 *pos++ = 7; /* len */
390 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
391 *pos++ = 0x50;
392 *pos++ = 0xf2;
393 *pos++ = 2; /* WME */
394 *pos++ = 0; /* WME info */
395 *pos++ = 1; /* WME ver */
396 *pos++ = qos_info;
397 }
398
399 /* add any remaining custom (i.e. vendor specific here) IEs */
400 if (wk->ie_len && wk->ie) {
401 noffset = wk->ie_len;
402 pos = skb_put(skb, noffset - offset);
403 memcpy(pos, wk->ie + offset, noffset - offset);
404 }
405
406 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
407 ieee80211_tx_skb(sdata, skb);
408}
409
410static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
411 struct ieee80211_work *wk)
412{
413 struct cfg80211_bss *cbss;
414 u16 capa_val = WLAN_CAPABILITY_ESS;
415
416 if (wk->probe_auth.privacy)
417 capa_val |= WLAN_CAPABILITY_PRIVACY;
418
419 cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
420 wk->probe_auth.ssid, wk->probe_auth.ssid_len,
421 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
422 capa_val);
423 if (!cbss)
424 return;
425
426 cfg80211_unlink_bss(local->hw.wiphy, cbss);
427 cfg80211_put_bss(cbss);
428}
429
430static enum work_action __must_check
431ieee80211_direct_probe(struct ieee80211_work *wk)
432{
433 struct ieee80211_sub_if_data *sdata = wk->sdata;
434 struct ieee80211_local *local = sdata->local;
435
436 wk->probe_auth.tries++;
437 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
438 printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
439 sdata->name, wk->filter_ta);
440
441 /*
442 * Most likely AP is not in the range so remove the
443 * bss struct for that AP.
444 */
445 ieee80211_remove_auth_bss(local, wk);
446
447 return WORK_ACT_TIMEOUT;
448 }
449
450 printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
451 sdata->name, wk->filter_ta, wk->probe_auth.tries);
452
453 /*
454 * Direct probe is sent to broadcast address as some APs
455 * will not answer to direct packet in unassociated state.
456 */
457 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
458 wk->probe_auth.ssid_len, NULL, 0);
459
460 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
461 run_again(local, wk->timeout);
462
463 return WORK_ACT_NONE;
464}
465
466
467static enum work_action __must_check
468ieee80211_authenticate(struct ieee80211_work *wk)
469{
470 struct ieee80211_sub_if_data *sdata = wk->sdata;
471 struct ieee80211_local *local = sdata->local;
472
473 wk->probe_auth.tries++;
474 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
475 printk(KERN_DEBUG "%s: authentication with %pM"
476 " timed out\n", sdata->name, wk->filter_ta);
477
478 /*
479 * Most likely AP is not in the range so remove the
480 * bss struct for that AP.
481 */
482 ieee80211_remove_auth_bss(local, wk);
483
484 return WORK_ACT_TIMEOUT;
485 }
486
487 printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
488 sdata->name, wk->filter_ta, wk->probe_auth.tries);
489
490 ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
491 wk->ie_len, wk->filter_ta, NULL, 0, 0);
492 wk->probe_auth.transaction = 2;
493
494 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
495 run_again(local, wk->timeout);
496
497 return WORK_ACT_NONE;
498}
499
500static enum work_action __must_check
501ieee80211_associate(struct ieee80211_work *wk)
502{
503 struct ieee80211_sub_if_data *sdata = wk->sdata;
504 struct ieee80211_local *local = sdata->local;
505
506 wk->assoc.tries++;
507 if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
508 printk(KERN_DEBUG "%s: association with %pM"
509 " timed out\n",
510 sdata->name, wk->filter_ta);
511
512 /*
513 * Most likely AP is not in the range so remove the
514 * bss struct for that AP.
515 */
516 if (wk->assoc.bss)
517 cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
518
519 return WORK_ACT_TIMEOUT;
520 }
521
522 printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
523 sdata->name, wk->filter_ta, wk->assoc.tries);
524 ieee80211_send_assoc(sdata, wk);
525
526 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
527 run_again(local, wk->timeout);
528
529 return WORK_ACT_NONE;
530}
531
532static enum work_action __must_check
533ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
534{
535 /*
536 * First time we run, do nothing -- the generic code will
537 * have switched to the right channel etc.
538 */
539 if (!wk->started) {
540 wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
541
542 cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
543 wk->chan, wk->chan_type,
544 wk->remain.duration, GFP_KERNEL);
545
546 return WORK_ACT_NONE;
547 }
548
549 return WORK_ACT_TIMEOUT;
550}
551
552static void ieee80211_auth_challenge(struct ieee80211_work *wk,
553 struct ieee80211_mgmt *mgmt,
554 size_t len)
555{
556 struct ieee80211_sub_if_data *sdata = wk->sdata;
557 u8 *pos;
558 struct ieee802_11_elems elems;
559
560 pos = mgmt->u.auth.variable;
561 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
562 if (!elems.challenge)
563 return;
564 ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
565 elems.challenge - 2, elems.challenge_len + 2,
566 wk->filter_ta, wk->probe_auth.key,
567 wk->probe_auth.key_len, wk->probe_auth.key_idx);
568 wk->probe_auth.transaction = 4;
569}
570
571static enum work_action __must_check
572ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
573 struct ieee80211_mgmt *mgmt, size_t len)
574{
575 u16 auth_alg, auth_transaction, status_code;
576
577 if (wk->type != IEEE80211_WORK_AUTH)
578 return WORK_ACT_NONE;
579
580 if (len < 24 + 6)
581 return WORK_ACT_NONE;
582
583 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
584 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
585 status_code = le16_to_cpu(mgmt->u.auth.status_code);
586
587 if (auth_alg != wk->probe_auth.algorithm ||
588 auth_transaction != wk->probe_auth.transaction)
589 return WORK_ACT_NONE;
590
591 if (status_code != WLAN_STATUS_SUCCESS) {
592 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
593 wk->sdata->name, mgmt->sa, status_code);
594 return WORK_ACT_DONE;
595 }
596
597 switch (wk->probe_auth.algorithm) {
598 case WLAN_AUTH_OPEN:
599 case WLAN_AUTH_LEAP:
600 case WLAN_AUTH_FT:
601 break;
602 case WLAN_AUTH_SHARED_KEY:
603 if (wk->probe_auth.transaction != 4) {
604 ieee80211_auth_challenge(wk, mgmt, len);
605 /* need another frame */
606 return WORK_ACT_NONE;
607 }
608 break;
609 default:
610 WARN_ON(1);
611 return WORK_ACT_NONE;
612 }
613
614 printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
615 return WORK_ACT_DONE;
616}
617
618static enum work_action __must_check
619ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
620 struct ieee80211_mgmt *mgmt, size_t len,
621 bool reassoc)
622{
623 struct ieee80211_sub_if_data *sdata = wk->sdata;
624 struct ieee80211_local *local = sdata->local;
625 u16 capab_info, status_code, aid;
626 struct ieee802_11_elems elems;
627 u8 *pos;
628
629 /*
630 * AssocResp and ReassocResp have identical structure, so process both
631 * of them in this function.
632 */
633
634 if (len < 24 + 6)
635 return WORK_ACT_NONE;
636
637 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
638 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
639 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
640
641 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
642 "status=%d aid=%d)\n",
643 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
644 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
645
646 pos = mgmt->u.assoc_resp.variable;
647 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
648
649 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
650 elems.timeout_int && elems.timeout_int_len == 5 &&
651 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
652 u32 tu, ms;
653 tu = get_unaligned_le32(elems.timeout_int + 1);
654 ms = tu * 1024 / 1000;
655 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
656 "comeback duration %u TU (%u ms)\n",
657 sdata->name, mgmt->sa, tu, ms);
658 wk->timeout = jiffies + msecs_to_jiffies(ms);
659 if (ms > IEEE80211_ASSOC_TIMEOUT)
660 run_again(local, wk->timeout);
661 return WORK_ACT_NONE;
662 }
663
664 if (status_code != WLAN_STATUS_SUCCESS)
665 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
666 sdata->name, mgmt->sa, status_code);
667 else
668 printk(KERN_DEBUG "%s: associated\n", sdata->name);
669
670 return WORK_ACT_DONE;
671}
672
673static enum work_action __must_check
674ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
675 struct ieee80211_mgmt *mgmt, size_t len,
676 struct ieee80211_rx_status *rx_status)
677{
678 struct ieee80211_sub_if_data *sdata = wk->sdata;
679 struct ieee80211_local *local = sdata->local;
680 size_t baselen;
681
682 ASSERT_WORK_MTX(local);
683
684 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
685 if (baselen > len)
686 return WORK_ACT_NONE;
687
688 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
689 return WORK_ACT_DONE;
690}
691
692static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
693 struct sk_buff *skb)
694{
695 struct ieee80211_rx_status *rx_status;
696 struct ieee80211_mgmt *mgmt;
697 struct ieee80211_work *wk;
698 enum work_action rma = WORK_ACT_NONE;
699 u16 fc;
700
701 rx_status = (struct ieee80211_rx_status *) skb->cb;
702 mgmt = (struct ieee80211_mgmt *) skb->data;
703 fc = le16_to_cpu(mgmt->frame_control);
704
705 mutex_lock(&local->work_mtx);
706
707 list_for_each_entry(wk, &local->work_list, list) {
708 const u8 *bssid = NULL;
709
710 switch (wk->type) {
711 case IEEE80211_WORK_DIRECT_PROBE:
712 case IEEE80211_WORK_AUTH:
713 case IEEE80211_WORK_ASSOC:
714 bssid = wk->filter_ta;
715 break;
716 default:
717 continue;
718 }
719
720 /*
721 * Before queuing, we already verified mgmt->sa,
722 * so this is needed just for matching.
723 */
724 if (compare_ether_addr(bssid, mgmt->bssid))
725 continue;
726
727 switch (fc & IEEE80211_FCTL_STYPE) {
728 case IEEE80211_STYPE_PROBE_RESP:
729 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
730 rx_status);
731 break;
732 case IEEE80211_STYPE_AUTH:
733 rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
734 break;
735 case IEEE80211_STYPE_ASSOC_RESP:
736 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
737 skb->len, false);
738 break;
739 case IEEE80211_STYPE_REASSOC_RESP:
740 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
741 skb->len, true);
742 break;
743 default:
744 WARN_ON(1);
745 }
746 /*
747 * We've processed this frame for that work, so it can't
748 * belong to another work struct.
749 * NB: this is also required for correctness for 'rma'!
750 */
751 break;
752 }
753
754 switch (rma) {
755 case WORK_ACT_NONE:
756 break;
757 case WORK_ACT_DONE:
758 list_del_rcu(&wk->list);
759 break;
760 default:
761 WARN(1, "unexpected: %d", rma);
762 }
763
764 mutex_unlock(&local->work_mtx);
765
766 if (rma != WORK_ACT_DONE)
767 goto out;
768
769 switch (wk->done(wk, skb)) {
770 case WORK_DONE_DESTROY:
771 free_work(wk);
772 break;
773 case WORK_DONE_REQUEUE:
774 synchronize_rcu();
775 wk->started = false; /* restart */
776 mutex_lock(&local->work_mtx);
777 list_add_tail(&wk->list, &local->work_list);
778 mutex_unlock(&local->work_mtx);
779 }
780
781 out:
782 kfree_skb(skb);
783}
784
785static void ieee80211_work_timer(unsigned long data)
786{
787 struct ieee80211_local *local = (void *) data;
788
789 if (local->quiescing)
790 return;
791
792 ieee80211_queue_work(&local->hw, &local->work_work);
793}
794
795static void ieee80211_work_work(struct work_struct *work)
796{
797 struct ieee80211_local *local =
798 container_of(work, struct ieee80211_local, work_work);
799 struct sk_buff *skb;
800 struct ieee80211_work *wk, *tmp;
801 LIST_HEAD(free_work);
802 enum work_action rma;
803 bool remain_off_channel = false;
804
805 if (local->scanning)
806 return;
807
808 /*
809 * ieee80211_queue_work() should have picked up most cases,
810 * here we'll pick the the rest.
811 */
812 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
813 return;
814
815 /* first process frames to avoid timing out while a frame is pending */
816 while ((skb = skb_dequeue(&local->work_skb_queue)))
817 ieee80211_work_rx_queued_mgmt(local, skb);
818
819 ieee80211_recalc_idle(local);
820
821 mutex_lock(&local->work_mtx);
822
823 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
824 bool started = wk->started;
825
826 /* mark work as started if it's on the current off-channel */
827 if (!started && local->tmp_channel &&
828 wk->chan == local->tmp_channel &&
829 wk->chan_type == local->tmp_channel_type) {
830 started = true;
831 wk->timeout = jiffies;
832 }
833
834 if (!started && !local->tmp_channel) {
835 /*
836 * TODO: could optimize this by leaving the
837 * station vifs in awake mode if they
838 * happen to be on the same channel as
839 * the requested channel
840 */
841 ieee80211_offchannel_stop_beaconing(local);
842 ieee80211_offchannel_stop_station(local);
843
844 local->tmp_channel = wk->chan;
845 local->tmp_channel_type = wk->chan_type;
846 ieee80211_hw_config(local, 0);
847 started = true;
848 wk->timeout = jiffies;
849 }
850
851 /* don't try to work with items that aren't started */
852 if (!started)
853 continue;
854
855 if (time_is_after_jiffies(wk->timeout)) {
856 /*
857 * This work item isn't supposed to be worked on
858 * right now, but take care to adjust the timer
859 * properly.
860 */
861 run_again(local, wk->timeout);
862 continue;
863 }
864
865 switch (wk->type) {
866 default:
867 WARN_ON(1);
868 /* nothing */
869 rma = WORK_ACT_NONE;
870 break;
871 case IEEE80211_WORK_ABORT:
872 rma = WORK_ACT_TIMEOUT;
873 break;
874 case IEEE80211_WORK_DIRECT_PROBE:
875 rma = ieee80211_direct_probe(wk);
876 break;
877 case IEEE80211_WORK_AUTH:
878 rma = ieee80211_authenticate(wk);
879 break;
880 case IEEE80211_WORK_ASSOC:
881 rma = ieee80211_associate(wk);
882 break;
883 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
884 rma = ieee80211_remain_on_channel_timeout(wk);
885 break;
886 }
887
888 wk->started = started;
889
890 switch (rma) {
891 case WORK_ACT_NONE:
892 /* might have changed the timeout */
893 run_again(local, wk->timeout);
894 break;
895 case WORK_ACT_TIMEOUT:
896 list_del_rcu(&wk->list);
897 synchronize_rcu();
898 list_add(&wk->list, &free_work);
899 break;
900 default:
901 WARN(1, "unexpected: %d", rma);
902 }
903 }
904
905 list_for_each_entry(wk, &local->work_list, list) {
906 if (!wk->started)
907 continue;
908 if (wk->chan != local->tmp_channel)
909 continue;
910 if (wk->chan_type != local->tmp_channel_type)
911 continue;
912 remain_off_channel = true;
913 }
914
915 if (!remain_off_channel && local->tmp_channel) {
916 local->tmp_channel = NULL;
917 ieee80211_hw_config(local, 0);
918 ieee80211_offchannel_return(local, true);
919 /* give connection some time to breathe */
920 run_again(local, jiffies + HZ/2);
921 }
922
923 if (list_empty(&local->work_list) && local->scan_req)
924 ieee80211_queue_delayed_work(&local->hw,
925 &local->scan_work,
926 round_jiffies_relative(0));
927
928 mutex_unlock(&local->work_mtx);
929
930 ieee80211_recalc_idle(local);
931
932 list_for_each_entry_safe(wk, tmp, &free_work, list) {
933 wk->done(wk, NULL);
934 list_del(&wk->list);
935 kfree(wk);
936 }
937}
938
939void ieee80211_add_work(struct ieee80211_work *wk)
940{
941 struct ieee80211_local *local;
942
943 if (WARN_ON(!wk->chan))
944 return;
945
946 if (WARN_ON(!wk->sdata))
947 return;
948
949 if (WARN_ON(!wk->done))
950 return;
951
952 if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
953 return;
954
955 wk->started = false;
956
957 local = wk->sdata->local;
958 mutex_lock(&local->work_mtx);
959 list_add_tail(&wk->list, &local->work_list);
960 mutex_unlock(&local->work_mtx);
961
962 ieee80211_queue_work(&local->hw, &local->work_work);
963}
964
965void ieee80211_work_init(struct ieee80211_local *local)
966{
967 mutex_init(&local->work_mtx);
968 INIT_LIST_HEAD(&local->work_list);
969 setup_timer(&local->work_timer, ieee80211_work_timer,
970 (unsigned long)local);
971 INIT_WORK(&local->work_work, ieee80211_work_work);
972 skb_queue_head_init(&local->work_skb_queue);
973}
974
975void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
976{
977 struct ieee80211_local *local = sdata->local;
978 struct ieee80211_work *wk;
979
980 mutex_lock(&local->work_mtx);
981 list_for_each_entry(wk, &local->work_list, list) {
982 if (wk->sdata != sdata)
983 continue;
984 wk->type = IEEE80211_WORK_ABORT;
985 wk->started = true;
986 wk->timeout = jiffies;
987 }
988 mutex_unlock(&local->work_mtx);
989
990 /* run cleanups etc. */
991 ieee80211_work_work(&local->work_work);
992
993 mutex_lock(&local->work_mtx);
994 list_for_each_entry(wk, &local->work_list, list) {
995 if (wk->sdata != sdata)
996 continue;
997 WARN_ON(1);
998 break;
999 }
1000 mutex_unlock(&local->work_mtx);
1001}
1002
1003ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1004 struct sk_buff *skb)
1005{
1006 struct ieee80211_local *local = sdata->local;
1007 struct ieee80211_mgmt *mgmt;
1008 struct ieee80211_work *wk;
1009 u16 fc;
1010
1011 if (skb->len < 24)
1012 return RX_DROP_MONITOR;
1013
1014 mgmt = (struct ieee80211_mgmt *) skb->data;
1015 fc = le16_to_cpu(mgmt->frame_control);
1016
1017 list_for_each_entry_rcu(wk, &local->work_list, list) {
1018 if (sdata != wk->sdata)
1019 continue;
1020 if (compare_ether_addr(wk->filter_ta, mgmt->sa))
1021 continue;
1022 if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
1023 continue;
1024
1025 switch (fc & IEEE80211_FCTL_STYPE) {
1026 case IEEE80211_STYPE_AUTH:
1027 case IEEE80211_STYPE_PROBE_RESP:
1028 case IEEE80211_STYPE_ASSOC_RESP:
1029 case IEEE80211_STYPE_REASSOC_RESP:
1030 skb_queue_tail(&local->work_skb_queue, skb);
1031 ieee80211_queue_work(&local->hw, &local->work_work);
1032 return RX_QUEUED;
1033 }
1034 }
1035
1036 return RX_CONTINUE;
1037}
1038
1039static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
1040 struct sk_buff *skb)
1041{
1042 /*
1043 * We are done serving the remain-on-channel command.
1044 */
1045 cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
1046 wk->chan, wk->chan_type,
1047 GFP_KERNEL);
1048
1049 return WORK_DONE_DESTROY;
1050}
1051
1052int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1053 struct ieee80211_channel *chan,
1054 enum nl80211_channel_type channel_type,
1055 unsigned int duration, u64 *cookie)
1056{
1057 struct ieee80211_work *wk;
1058
1059 wk = kzalloc(sizeof(*wk), GFP_KERNEL);
1060 if (!wk)
1061 return -ENOMEM;
1062
1063 wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
1064 wk->chan = chan;
1065 wk->chan_type = channel_type;
1066 wk->sdata = sdata;
1067 wk->done = ieee80211_remain_done;
1068
1069 wk->remain.duration = duration;
1070
1071 *cookie = (unsigned long) wk;
1072
1073 ieee80211_add_work(wk);
1074
1075 return 0;
1076}
1077
1078int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1079 u64 cookie)
1080{
1081 struct ieee80211_local *local = sdata->local;
1082 struct ieee80211_work *wk, *tmp;
1083 bool found = false;
1084
1085 mutex_lock(&local->work_mtx);
1086 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
1087 if ((unsigned long) wk == cookie) {
1088 wk->timeout = jiffies;
1089 found = true;
1090 break;
1091 }
1092 }
1093 mutex_unlock(&local->work_mtx);
1094
1095 if (!found)
1096 return -ENOENT;
1097
1098 ieee80211_queue_work(&local->hw, &local->work_work);
1099
1100 return 0;
1101}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 5332014cb229..0adbcc941ac9 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -9,10 +9,10 @@
9 9
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/skbuff.h> 12#include <linux/skbuff.h>
14#include <linux/compiler.h> 13#include <linux/compiler.h>
15#include <linux/ieee80211.h> 14#include <linux/ieee80211.h>
15#include <linux/gfp.h>
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18 18
@@ -31,8 +31,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
31 unsigned int hdrlen; 31 unsigned int hdrlen;
32 struct ieee80211_hdr *hdr; 32 struct ieee80211_hdr *hdr;
33 struct sk_buff *skb = tx->skb; 33 struct sk_buff *skb = tx->skb;
34 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
34 int authenticator; 35 int authenticator;
35 int wpa_test = 0;
36 int tail; 36 int tail;
37 37
38 hdr = (struct ieee80211_hdr *)skb->data; 38 hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,16 +47,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
47 data = skb->data + hdrlen; 47 data = skb->data + hdrlen;
48 data_len = skb->len - hdrlen; 48 data_len = skb->len - hdrlen;
49 49
50 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 50 if (info->control.hw_key &&
51 !(tx->flags & IEEE80211_TX_FRAGMENTED) && 51 !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
52 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && 52 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
53 !wpa_test) { 53 /* hwaccel - with no need for SW-generated MMIC */
54 /* hwaccel - with no need for preallocated room for MMIC */
55 return TX_CONTINUE; 54 return TX_CONTINUE;
56 } 55 }
57 56
58 tail = MICHAEL_MIC_LEN; 57 tail = MICHAEL_MIC_LEN;
59 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 58 if (!info->control.hw_key)
60 tail += TKIP_ICV_LEN; 59 tail += TKIP_ICV_LEN;
61 60
62 if (WARN_ON(skb_tailroom(skb) < tail || 61 if (WARN_ON(skb_tailroom(skb) < tail ||
@@ -147,17 +146,16 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
147 int len, tail; 146 int len, tail;
148 u8 *pos; 147 u8 *pos;
149 148
150 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 149 if (info->control.hw_key &&
151 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 150 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
152 /* hwaccel - with no need for preallocated room for IV/ICV */ 151 /* hwaccel - with no need for software-generated IV */
153 info->control.hw_key = &tx->key->conf;
154 return 0; 152 return 0;
155 } 153 }
156 154
157 hdrlen = ieee80211_hdrlen(hdr->frame_control); 155 hdrlen = ieee80211_hdrlen(hdr->frame_control);
158 len = skb->len - hdrlen; 156 len = skb->len - hdrlen;
159 157
160 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 158 if (info->control.hw_key)
161 tail = 0; 159 tail = 0;
162 else 160 else
163 tail = TKIP_ICV_LEN; 161 tail = TKIP_ICV_LEN;
@@ -175,13 +173,11 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
175 if (key->u.tkip.tx.iv16 == 0) 173 if (key->u.tkip.tx.iv16 == 0)
176 key->u.tkip.tx.iv32++; 174 key->u.tkip.tx.iv32++;
177 175
178 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 176 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
179 /* hwaccel - with preallocated room for IV */
180 ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
181 177
182 info->control.hw_key = &tx->key->conf; 178 /* hwaccel - with software IV */
179 if (info->control.hw_key)
183 return 0; 180 return 0;
184 }
185 181
186 /* Add room for ICV */ 182 /* Add room for ICV */
187 skb_put(skb, TKIP_ICV_LEN); 183 skb_put(skb, TKIP_ICV_LEN);
@@ -363,24 +359,20 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
363 int hdrlen, len, tail; 359 int hdrlen, len, tail;
364 u8 *pos, *pn; 360 u8 *pos, *pn;
365 int i; 361 int i;
366 bool skip_hw;
367
368 skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT) &&
369 ieee80211_is_mgmt(hdr->frame_control);
370 362
371 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 363 if (info->control.hw_key &&
372 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && 364 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
373 !skip_hw) { 365 /*
374 /* hwaccel - with no need for preallocated room for CCMP 366 * hwaccel has no need for preallocated room for CCMP
375 * header or MIC fields */ 367 * header or MIC fields
376 info->control.hw_key = &tx->key->conf; 368 */
377 return 0; 369 return 0;
378 } 370 }
379 371
380 hdrlen = ieee80211_hdrlen(hdr->frame_control); 372 hdrlen = ieee80211_hdrlen(hdr->frame_control);
381 len = skb->len - hdrlen; 373 len = skb->len - hdrlen;
382 374
383 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 375 if (info->control.hw_key)
384 tail = 0; 376 tail = 0;
385 else 377 else
386 tail = CCMP_MIC_LEN; 378 tail = CCMP_MIC_LEN;
@@ -405,11 +397,9 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
405 397
406 ccmp_pn2hdr(pos, pn, key->conf.keyidx); 398 ccmp_pn2hdr(pos, pn, key->conf.keyidx);
407 399
408 if ((key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && !skip_hw) { 400 /* hwaccel - with software CCMP header */
409 /* hwaccel - with preallocated room for CCMP header */ 401 if (info->control.hw_key)
410 info->control.hw_key = &tx->key->conf;
411 return 0; 402 return 0;
412 }
413 403
414 pos += CCMP_HDR_LEN; 404 pos += CCMP_HDR_LEN;
415 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); 405 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0);
@@ -525,11 +515,8 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
525 u8 *pn, aad[20]; 515 u8 *pn, aad[20];
526 int i; 516 int i;
527 517
528 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 518 if (info->control.hw_key)
529 /* hwaccel */
530 info->control.hw_key = &tx->key->conf;
531 return 0; 519 return 0;
532 }
533 520
534 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) 521 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
535 return TX_DROP; 522 return TX_DROP;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 634d14affc8d..18d77b5c351a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -83,6 +83,19 @@ config NF_CONNTRACK_SECMARK
83 83
84 If unsure, say 'N'. 84 If unsure, say 'N'.
85 85
86config NF_CONNTRACK_ZONES
87 bool 'Connection tracking zones'
88 depends on NETFILTER_ADVANCED
89 depends on NETFILTER_XT_TARGET_CT
90 help
91 This option enables support for connection tracking zones.
92 Normally, each connection needs to have a unique system wide
93 identity. Connection tracking zones allow to have multiple
94 connections using the same identity, as long as they are
95 contained in different zones.
96
97 If unsure, say `N'.
98
86config NF_CONNTRACK_EVENTS 99config NF_CONNTRACK_EVENTS
87 bool "Connection tracking events" 100 bool "Connection tracking events"
88 depends on NETFILTER_ADVANCED 101 depends on NETFILTER_ADVANCED
@@ -341,6 +354,18 @@ config NETFILTER_XT_TARGET_CONNSECMARK
341 354
342 To compile it as a module, choose M here. If unsure, say N. 355 To compile it as a module, choose M here. If unsure, say N.
343 356
357config NETFILTER_XT_TARGET_CT
358 tristate '"CT" target support'
359 depends on NF_CONNTRACK
360 depends on IP_NF_RAW || IP6_NF_RAW
361 depends on NETFILTER_ADVANCED
362 help
363 This options adds a `CT' target, which allows to specify initial
364 connection tracking parameters like events to be delivered and
365 the helper to be used.
366
367 To compile it as a module, choose M here. If unsure, say N.
368
344config NETFILTER_XT_TARGET_DSCP 369config NETFILTER_XT_TARGET_DSCP
345 tristate '"DSCP" and "TOS" target support' 370 tristate '"DSCP" and "TOS" target support'
346 depends on IP_NF_MANGLE || IP6_NF_MANGLE 371 depends on IP_NF_MANGLE || IP6_NF_MANGLE
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 49f62ee4e9ff..f873644f02f6 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
44obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o 44obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 48obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o 50obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 60ec4e4badaa..78b505d33bfb 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -19,6 +19,7 @@
19#include <linux/inetdevice.h> 19#include <linux/inetdevice.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/slab.h>
22#include <net/net_namespace.h> 23#include <net/net_namespace.h>
23#include <net/sock.h> 24#include <net/sock.h>
24 25
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index f2d76238b9b5..712ccad13344 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -68,6 +68,10 @@ config IP_VS_TAB_BITS
68 each hash entry uses 8 bytes, so you can estimate how much memory is 68 each hash entry uses 8 bytes, so you can estimate how much memory is
69 needed for your box. 69 needed for your box.
70 70
71 You can overwrite this number setting conn_tab_bits module parameter
72 or by appending ip_vs.conn_tab_bits=? to the kernel command line
73 if IP VS was compiled built-in.
74
71comment "IPVS transport protocol load balancing support" 75comment "IPVS transport protocol load balancing support"
72 76
73config IP_VS_PROTO_TCP 77config IP_VS_PROTO_TCP
@@ -100,6 +104,13 @@ config IP_VS_PROTO_AH
100 This option enables support for load balancing AH (Authentication 104 This option enables support for load balancing AH (Authentication
101 Header) transport protocol. Say Y if unsure. 105 Header) transport protocol. Say Y if unsure.
102 106
107config IP_VS_PROTO_SCTP
108 bool "SCTP load balancing support"
109 select LIBCRC32C
110 ---help---
111 This option enables support for load balancing SCTP transport
112 protocol. Say Y if unsure.
113
103comment "IPVS scheduler" 114comment "IPVS scheduler"
104 115
105config IP_VS_RR 116config IP_VS_RR
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 73a46fe1fe4c..e3baefd7066e 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -7,6 +7,7 @@ ip_vs_proto-objs-y :=
7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o 7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o 8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o 9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
10ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_SCTP) += ip_vs_proto_sctp.o
10 11
11ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ 12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
12 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ 13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 3c7e42735b60..1cb0e834f8ff 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -27,6 +27,7 @@
27#include <linux/in.h> 27#include <linux/in.h>
28#include <linux/ip.h> 28#include <linux/ip.h>
29#include <linux/netfilter.h> 29#include <linux/netfilter.h>
30#include <linux/slab.h>
30#include <net/net_namespace.h> 31#include <net/net_namespace.h>
31#include <net/protocol.h> 32#include <net/protocol.h>
32#include <net/tcp.h> 33#include <net/tcp.h>
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 27c30cf933da..d8f7e8ef67b4 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/proc_fs.h> /* for proc_net_* */ 34#include <linux/proc_fs.h> /* for proc_net_* */
35#include <linux/slab.h>
35#include <linux/seq_file.h> 36#include <linux/seq_file.h>
36#include <linux/jhash.h> 37#include <linux/jhash.h>
37#include <linux/random.h> 38#include <linux/random.h>
@@ -40,6 +41,21 @@
40#include <net/ip_vs.h> 41#include <net/ip_vs.h>
41 42
42 43
44#ifndef CONFIG_IP_VS_TAB_BITS
45#define CONFIG_IP_VS_TAB_BITS 12
46#endif
47
48/*
49 * Connection hash size. Default is what was selected at compile time.
50*/
51int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
52module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
53MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
54
55/* size and mask values */
56int ip_vs_conn_tab_size;
57int ip_vs_conn_tab_mask;
58
43/* 59/*
44 * Connection hash table: for input and output packets lookups of IPVS 60 * Connection hash table: for input and output packets lookups of IPVS
45 */ 61 */
@@ -125,11 +141,11 @@ static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
125 if (af == AF_INET6) 141 if (af == AF_INET6)
126 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), 142 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
127 (__force u32)port, proto, ip_vs_conn_rnd) 143 (__force u32)port, proto, ip_vs_conn_rnd)
128 & IP_VS_CONN_TAB_MASK; 144 & ip_vs_conn_tab_mask;
129#endif 145#endif
130 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto, 146 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
131 ip_vs_conn_rnd) 147 ip_vs_conn_rnd)
132 & IP_VS_CONN_TAB_MASK; 148 & ip_vs_conn_tab_mask;
133} 149}
134 150
135 151
@@ -760,7 +776,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
760 int idx; 776 int idx;
761 struct ip_vs_conn *cp; 777 struct ip_vs_conn *cp;
762 778
763 for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { 779 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
764 ct_read_lock_bh(idx); 780 ct_read_lock_bh(idx);
765 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 781 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
766 if (pos-- == 0) { 782 if (pos-- == 0) {
@@ -797,7 +813,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
797 idx = l - ip_vs_conn_tab; 813 idx = l - ip_vs_conn_tab;
798 ct_read_unlock_bh(idx); 814 ct_read_unlock_bh(idx);
799 815
800 while (++idx < IP_VS_CONN_TAB_SIZE) { 816 while (++idx < ip_vs_conn_tab_size) {
801 ct_read_lock_bh(idx); 817 ct_read_lock_bh(idx);
802 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 818 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
803 seq->private = &ip_vs_conn_tab[idx]; 819 seq->private = &ip_vs_conn_tab[idx];
@@ -976,8 +992,8 @@ void ip_vs_random_dropentry(void)
976 /* 992 /*
977 * Randomly scan 1/32 of the whole table every second 993 * Randomly scan 1/32 of the whole table every second
978 */ 994 */
979 for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) { 995 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
980 unsigned hash = net_random() & IP_VS_CONN_TAB_MASK; 996 unsigned hash = net_random() & ip_vs_conn_tab_mask;
981 997
982 /* 998 /*
983 * Lock is actually needed in this loop. 999 * Lock is actually needed in this loop.
@@ -1029,7 +1045,7 @@ static void ip_vs_conn_flush(void)
1029 struct ip_vs_conn *cp; 1045 struct ip_vs_conn *cp;
1030 1046
1031 flush_again: 1047 flush_again:
1032 for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) { 1048 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1033 /* 1049 /*
1034 * Lock is actually needed in this loop. 1050 * Lock is actually needed in this loop.
1035 */ 1051 */
@@ -1060,10 +1076,15 @@ int __init ip_vs_conn_init(void)
1060{ 1076{
1061 int idx; 1077 int idx;
1062 1078
1079 /* Compute size and mask */
1080 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
1081 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
1082
1063 /* 1083 /*
1064 * Allocate the connection hash table and initialize its list heads 1084 * Allocate the connection hash table and initialize its list heads
1065 */ 1085 */
1066 ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head)); 1086 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
1087 sizeof(struct list_head));
1067 if (!ip_vs_conn_tab) 1088 if (!ip_vs_conn_tab)
1068 return -ENOMEM; 1089 return -ENOMEM;
1069 1090
@@ -1078,12 +1099,12 @@ int __init ip_vs_conn_init(void)
1078 1099
1079 pr_info("Connection hash table configured " 1100 pr_info("Connection hash table configured "
1080 "(size=%d, memory=%ldKbytes)\n", 1101 "(size=%d, memory=%ldKbytes)\n",
1081 IP_VS_CONN_TAB_SIZE, 1102 ip_vs_conn_tab_size,
1082 (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024); 1103 (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
1083 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", 1104 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
1084 sizeof(struct ip_vs_conn)); 1105 sizeof(struct ip_vs_conn));
1085 1106
1086 for (idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { 1107 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1087 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]); 1108 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
1088 } 1109 }
1089 1110
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 847ffca40184..1cd6e3fd058b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -31,7 +31,9 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/tcp.h> 33#include <linux/tcp.h>
34#include <linux/sctp.h>
34#include <linux/icmp.h> 35#include <linux/icmp.h>
36#include <linux/slab.h>
35 37
36#include <net/ip.h> 38#include <net/ip.h>
37#include <net/tcp.h> 39#include <net/tcp.h>
@@ -81,6 +83,8 @@ const char *ip_vs_proto_name(unsigned proto)
81 return "UDP"; 83 return "UDP";
82 case IPPROTO_TCP: 84 case IPPROTO_TCP:
83 return "TCP"; 85 return "TCP";
86 case IPPROTO_SCTP:
87 return "SCTP";
84 case IPPROTO_ICMP: 88 case IPPROTO_ICMP:
85 return "ICMP"; 89 return "ICMP";
86#ifdef CONFIG_IP_VS_IPV6 90#ifdef CONFIG_IP_VS_IPV6
@@ -512,8 +516,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
512 */ 516 */
513#ifdef CONFIG_IP_VS_IPV6 517#ifdef CONFIG_IP_VS_IPV6
514 if (svc->af == AF_INET6) 518 if (svc->af == AF_INET6)
515 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, 519 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
516 skb->dev);
517 else 520 else
518#endif 521#endif
519 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 522 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -589,8 +592,9 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
589 ip_send_check(ciph); 592 ip_send_check(ciph);
590 } 593 }
591 594
592 /* the TCP/UDP port */ 595 /* the TCP/UDP/SCTP port */
593 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) { 596 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
597 IPPROTO_SCTP == ciph->protocol) {
594 __be16 *ports = (void *)ciph + ciph->ihl*4; 598 __be16 *ports = (void *)ciph + ciph->ihl*4;
595 599
596 if (inout) 600 if (inout)
@@ -630,8 +634,9 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
630 ciph->saddr = cp->daddr.in6; 634 ciph->saddr = cp->daddr.in6;
631 } 635 }
632 636
633 /* the TCP/UDP port */ 637 /* the TCP/UDP/SCTP port */
634 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) { 638 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
639 IPPROTO_SCTP == ciph->nexthdr) {
635 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); 640 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
636 641
637 if (inout) 642 if (inout)
@@ -679,7 +684,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
679 goto out; 684 goto out;
680 } 685 }
681 686
682 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol) 687 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
688 IPPROTO_SCTP == protocol)
683 offset += 2 * sizeof(__u16); 689 offset += 2 * sizeof(__u16);
684 if (!skb_make_writable(skb, offset)) 690 if (!skb_make_writable(skb, offset))
685 goto out; 691 goto out;
@@ -857,6 +863,21 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
857} 863}
858#endif 864#endif
859 865
866/*
867 * Check if sctp chunc is ABORT chunk
868 */
869static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
870{
871 sctp_chunkhdr_t *sch, schunk;
872 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
873 sizeof(schunk), &schunk);
874 if (sch == NULL)
875 return 0;
876 if (sch->type == SCTP_CID_ABORT)
877 return 1;
878 return 0;
879}
880
860static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) 881static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
861{ 882{
862 struct tcphdr _tcph, *th; 883 struct tcphdr _tcph, *th;
@@ -999,7 +1020,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
999 if (unlikely(!cp)) { 1020 if (unlikely(!cp)) {
1000 if (sysctl_ip_vs_nat_icmp_send && 1021 if (sysctl_ip_vs_nat_icmp_send &&
1001 (pp->protocol == IPPROTO_TCP || 1022 (pp->protocol == IPPROTO_TCP ||
1002 pp->protocol == IPPROTO_UDP)) { 1023 pp->protocol == IPPROTO_UDP ||
1024 pp->protocol == IPPROTO_SCTP)) {
1003 __be16 _ports[2], *pptr; 1025 __be16 _ports[2], *pptr;
1004 1026
1005 pptr = skb_header_pointer(skb, iph.len, 1027 pptr = skb_header_pointer(skb, iph.len,
@@ -1014,14 +1036,19 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
1014 * existing entry if it is not RST 1036 * existing entry if it is not RST
1015 * packet or not TCP packet. 1037 * packet or not TCP packet.
1016 */ 1038 */
1017 if (iph.protocol != IPPROTO_TCP 1039 if ((iph.protocol != IPPROTO_TCP &&
1018 || !is_tcp_reset(skb, iph.len)) { 1040 iph.protocol != IPPROTO_SCTP)
1041 || ((iph.protocol == IPPROTO_TCP
1042 && !is_tcp_reset(skb, iph.len))
1043 || (iph.protocol == IPPROTO_SCTP
1044 && !is_sctp_abort(skb,
1045 iph.len)))) {
1019#ifdef CONFIG_IP_VS_IPV6 1046#ifdef CONFIG_IP_VS_IPV6
1020 if (af == AF_INET6) 1047 if (af == AF_INET6)
1021 icmpv6_send(skb, 1048 icmpv6_send(skb,
1022 ICMPV6_DEST_UNREACH, 1049 ICMPV6_DEST_UNREACH,
1023 ICMPV6_PORT_UNREACH, 1050 ICMPV6_PORT_UNREACH,
1024 0, skb->dev); 1051 0);
1025 else 1052 else
1026#endif 1053#endif
1027 icmp_send(skb, 1054 icmp_send(skb,
@@ -1235,7 +1262,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1235 1262
1236 /* do the statistics and put it back */ 1263 /* do the statistics and put it back */
1237 ip_vs_in_stats(cp, skb); 1264 ip_vs_in_stats(cp, skb);
1238 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr) 1265 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1266 IPPROTO_SCTP == cih->nexthdr)
1239 offset += 2 * sizeof(__u16); 1267 offset += 2 * sizeof(__u16);
1240 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); 1268 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
1241 /* do not touch skb anymore */ 1269 /* do not touch skb anymore */
@@ -1358,6 +1386,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1358 * encorage the standby servers to update the connections timeout 1386 * encorage the standby servers to update the connections timeout
1359 */ 1387 */
1360 pkts = atomic_add_return(1, &cp->in_pkts); 1388 pkts = atomic_add_return(1, &cp->in_pkts);
1389 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1390 cp->protocol == IPPROTO_SCTP) {
1391 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1392 (atomic_read(&cp->in_pkts) %
1393 sysctl_ip_vs_sync_threshold[1]
1394 == sysctl_ip_vs_sync_threshold[0])) ||
1395 (cp->old_state != cp->state &&
1396 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
1397 (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
1398 (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
1399 ip_vs_sync_conn(cp);
1400 goto out;
1401 }
1402 }
1403
1361 if (af == AF_INET && 1404 if (af == AF_INET &&
1362 (ip_vs_sync_state & IP_VS_STATE_MASTER) && 1405 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1363 (((cp->protocol != IPPROTO_TCP || 1406 (((cp->protocol != IPPROTO_TCP ||
@@ -1370,6 +1413,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1370 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || 1413 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1371 (cp->state == IP_VS_TCP_S_TIME_WAIT))))) 1414 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1372 ip_vs_sync_conn(cp); 1415 ip_vs_sync_conn(cp);
1416out:
1373 cp->old_state = cp->state; 1417 cp->old_state = cp->state;
1374 1418
1375 ip_vs_conn_put(cp); 1419 ip_vs_conn_put(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c37ac2d7bec4..36dc1d88c2fa 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -31,6 +31,7 @@
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/slab.h>
34 35
35#include <linux/netfilter.h> 36#include <linux/netfilter.h>
36#include <linux/netfilter_ipv4.h> 37#include <linux/netfilter_ipv4.h>
@@ -1843,7 +1844,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1843 if (v == SEQ_START_TOKEN) { 1844 if (v == SEQ_START_TOKEN) {
1844 seq_printf(seq, 1845 seq_printf(seq,
1845 "IP Virtual Server version %d.%d.%d (size=%d)\n", 1846 "IP Virtual Server version %d.%d.%d (size=%d)\n",
1846 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); 1847 NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
1847 seq_puts(seq, 1848 seq_puts(seq,
1848 "Prot LocalAddress:Port Scheduler Flags\n"); 1849 "Prot LocalAddress:Port Scheduler Flags\n");
1849 seq_puts(seq, 1850 seq_puts(seq,
@@ -2132,8 +2133,9 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2132 } 2133 }
2133 } 2134 }
2134 2135
2135 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ 2136 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
2136 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { 2137 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
2138 usvc.protocol != IPPROTO_SCTP) {
2137 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2139 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
2138 usvc.protocol, &usvc.addr.ip, 2140 usvc.protocol, &usvc.addr.ip,
2139 ntohs(usvc.port), usvc.sched_name); 2141 ntohs(usvc.port), usvc.sched_name);
@@ -2386,7 +2388,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2386 char buf[64]; 2388 char buf[64];
2387 2389
2388 sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)", 2390 sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
2389 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); 2391 NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
2390 if (copy_to_user(user, buf, strlen(buf)+1) != 0) { 2392 if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
2391 ret = -EFAULT; 2393 ret = -EFAULT;
2392 goto out; 2394 goto out;
@@ -2399,7 +2401,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2399 { 2401 {
2400 struct ip_vs_getinfo info; 2402 struct ip_vs_getinfo info;
2401 info.version = IP_VS_VERSION_CODE; 2403 info.version = IP_VS_VERSION_CODE;
2402 info.size = IP_VS_CONN_TAB_SIZE; 2404 info.size = ip_vs_conn_tab_size;
2403 info.num_services = ip_vs_num_services; 2405 info.num_services = ip_vs_num_services;
2404 if (copy_to_user(user, &info, sizeof(info)) != 0) 2406 if (copy_to_user(user, &info, sizeof(info)) != 0)
2405 ret = -EFAULT; 2407 ret = -EFAULT;
@@ -3243,7 +3245,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3243 case IPVS_CMD_GET_INFO: 3245 case IPVS_CMD_GET_INFO:
3244 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); 3246 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
3245 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, 3247 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3246 IP_VS_CONN_TAB_SIZE); 3248 ip_vs_conn_tab_size);
3247 break; 3249 break;
3248 } 3250 }
3249 3251
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index fe3e18834b91..95fd0d14200b 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -39,6 +39,7 @@
39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40 40
41#include <linux/ip.h> 41#include <linux/ip.h>
42#include <linux/slab.h>
42#include <linux/module.h> 43#include <linux/module.h>
43#include <linux/kernel.h> 44#include <linux/kernel.h>
44#include <linux/skbuff.h> 45#include <linux/skbuff.h>
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 702b53ca937c..ff28801962e0 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20#include <linux/slab.h>
21#include <linux/types.h> 20#include <linux/types.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/sysctl.h> 22#include <linux/sysctl.h>
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 33e2c799cba7..2c7f185dfae4 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -32,6 +32,7 @@
32#include <linux/in.h> 32#include <linux/in.h>
33#include <linux/ip.h> 33#include <linux/ip.h>
34#include <linux/netfilter.h> 34#include <linux/netfilter.h>
35#include <linux/gfp.h>
35#include <net/protocol.h> 36#include <net/protocol.h>
36#include <net/tcp.h> 37#include <net/tcp.h>
37#include <asm/unaligned.h> 38#include <asm/unaligned.h>
@@ -208,7 +209,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
208 */ 209 */
209 from.ip = n_cp->vaddr.ip; 210 from.ip = n_cp->vaddr.ip;
210 port = n_cp->vport; 211 port = n_cp->vport;
211 sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip), 212 sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
212 (ntohs(port)>>8)&255, ntohs(port)&255); 213 (ntohs(port)>>8)&255, ntohs(port)&255);
213 buf_len = strlen(buf); 214 buf_len = strlen(buf);
214 215
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 1b9370db2305..94a45213faa6 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -43,6 +43,7 @@
43#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 43#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 44
45#include <linux/ip.h> 45#include <linux/ip.h>
46#include <linux/slab.h>
46#include <linux/module.h> 47#include <linux/module.h>
47#include <linux/kernel.h> 48#include <linux/kernel.h>
48#include <linux/skbuff.h> 49#include <linux/skbuff.h>
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index f7476b95ab46..535dc2b419d8 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -45,6 +45,8 @@
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/skbuff.h> 46#include <linux/skbuff.h>
47#include <linux/jiffies.h> 47#include <linux/jiffies.h>
48#include <linux/list.h>
49#include <linux/slab.h>
48 50
49/* for sysctl */ 51/* for sysctl */
50#include <linux/fs.h> 52#include <linux/fs.h>
@@ -85,25 +87,25 @@ static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
85/* 87/*
86 * IPVS destination set structure and operations 88 * IPVS destination set structure and operations
87 */ 89 */
88struct ip_vs_dest_list { 90struct ip_vs_dest_set_elem {
89 struct ip_vs_dest_list *next; /* list link */ 91 struct list_head list; /* list link */
90 struct ip_vs_dest *dest; /* destination server */ 92 struct ip_vs_dest *dest; /* destination server */
91}; 93};
92 94
93struct ip_vs_dest_set { 95struct ip_vs_dest_set {
94 atomic_t size; /* set size */ 96 atomic_t size; /* set size */
95 unsigned long lastmod; /* last modified time */ 97 unsigned long lastmod; /* last modified time */
96 struct ip_vs_dest_list *list; /* destination list */ 98 struct list_head list; /* destination list */
97 rwlock_t lock; /* lock for this list */ 99 rwlock_t lock; /* lock for this list */
98}; 100};
99 101
100 102
101static struct ip_vs_dest_list * 103static struct ip_vs_dest_set_elem *
102ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 104ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
103{ 105{
104 struct ip_vs_dest_list *e; 106 struct ip_vs_dest_set_elem *e;
105 107
106 for (e=set->list; e!=NULL; e=e->next) { 108 list_for_each_entry(e, &set->list, list) {
107 if (e->dest == dest) 109 if (e->dest == dest)
108 /* already existed */ 110 /* already existed */
109 return NULL; 111 return NULL;
@@ -118,9 +120,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
118 atomic_inc(&dest->refcnt); 120 atomic_inc(&dest->refcnt);
119 e->dest = dest; 121 e->dest = dest;
120 122
121 /* link it to the list */ 123 list_add(&e->list, &set->list);
122 e->next = set->list;
123 set->list = e;
124 atomic_inc(&set->size); 124 atomic_inc(&set->size);
125 125
126 set->lastmod = jiffies; 126 set->lastmod = jiffies;
@@ -130,34 +130,33 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
130static void 130static void
131ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 131ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
132{ 132{
133 struct ip_vs_dest_list *e, **ep; 133 struct ip_vs_dest_set_elem *e;
134 134
135 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { 135 list_for_each_entry(e, &set->list, list) {
136 if (e->dest == dest) { 136 if (e->dest == dest) {
137 /* HIT */ 137 /* HIT */
138 *ep = e->next;
139 atomic_dec(&set->size); 138 atomic_dec(&set->size);
140 set->lastmod = jiffies; 139 set->lastmod = jiffies;
141 atomic_dec(&e->dest->refcnt); 140 atomic_dec(&e->dest->refcnt);
141 list_del(&e->list);
142 kfree(e); 142 kfree(e);
143 break; 143 break;
144 } 144 }
145 ep = &e->next;
146 } 145 }
147} 146}
148 147
149static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) 148static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
150{ 149{
151 struct ip_vs_dest_list *e, **ep; 150 struct ip_vs_dest_set_elem *e, *ep;
152 151
153 write_lock(&set->lock); 152 write_lock(&set->lock);
154 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { 153 list_for_each_entry_safe(e, ep, &set->list, list) {
155 *ep = e->next;
156 /* 154 /*
157 * We don't kfree dest because it is refered either 155 * We don't kfree dest because it is refered either
158 * by its service or by the trash dest list. 156 * by its service or by the trash dest list.
159 */ 157 */
160 atomic_dec(&e->dest->refcnt); 158 atomic_dec(&e->dest->refcnt);
159 list_del(&e->list);
161 kfree(e); 160 kfree(e);
162 } 161 }
163 write_unlock(&set->lock); 162 write_unlock(&set->lock);
@@ -166,7 +165,7 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
166/* get weighted least-connection node in the destination set */ 165/* get weighted least-connection node in the destination set */
167static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) 166static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
168{ 167{
169 register struct ip_vs_dest_list *e; 168 register struct ip_vs_dest_set_elem *e;
170 struct ip_vs_dest *dest, *least; 169 struct ip_vs_dest *dest, *least;
171 int loh, doh; 170 int loh, doh;
172 171
@@ -174,7 +173,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
174 return NULL; 173 return NULL;
175 174
176 /* select the first destination server, whose weight > 0 */ 175 /* select the first destination server, whose weight > 0 */
177 for (e=set->list; e!=NULL; e=e->next) { 176 list_for_each_entry(e, &set->list, list) {
178 least = e->dest; 177 least = e->dest;
179 if (least->flags & IP_VS_DEST_F_OVERLOAD) 178 if (least->flags & IP_VS_DEST_F_OVERLOAD)
180 continue; 179 continue;
@@ -190,7 +189,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
190 189
191 /* find the destination with the weighted least load */ 190 /* find the destination with the weighted least load */
192 nextstage: 191 nextstage:
193 for (e=e->next; e!=NULL; e=e->next) { 192 list_for_each_entry(e, &set->list, list) {
194 dest = e->dest; 193 dest = e->dest;
195 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 194 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
196 continue; 195 continue;
@@ -220,7 +219,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
220/* get weighted most-connection node in the destination set */ 219/* get weighted most-connection node in the destination set */
221static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) 220static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
222{ 221{
223 register struct ip_vs_dest_list *e; 222 register struct ip_vs_dest_set_elem *e;
224 struct ip_vs_dest *dest, *most; 223 struct ip_vs_dest *dest, *most;
225 int moh, doh; 224 int moh, doh;
226 225
@@ -228,7 +227,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
228 return NULL; 227 return NULL;
229 228
230 /* select the first destination server, whose weight > 0 */ 229 /* select the first destination server, whose weight > 0 */
231 for (e=set->list; e!=NULL; e=e->next) { 230 list_for_each_entry(e, &set->list, list) {
232 most = e->dest; 231 most = e->dest;
233 if (atomic_read(&most->weight) > 0) { 232 if (atomic_read(&most->weight) > 0) {
234 moh = atomic_read(&most->activeconns) * 50 233 moh = atomic_read(&most->activeconns) * 50
@@ -240,7 +239,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
240 239
241 /* find the destination with the weighted most load */ 240 /* find the destination with the weighted most load */
242 nextstage: 241 nextstage:
243 for (e=e->next; e!=NULL; e=e->next) { 242 list_for_each_entry(e, &set->list, list) {
244 dest = e->dest; 243 dest = e->dest;
245 doh = atomic_read(&dest->activeconns) * 50 244 doh = atomic_read(&dest->activeconns) * 50
246 + atomic_read(&dest->inactconns); 245 + atomic_read(&dest->inactconns);
@@ -389,7 +388,7 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
389 388
390 /* initilize its dest set */ 389 /* initilize its dest set */
391 atomic_set(&(en->set.size), 0); 390 atomic_set(&(en->set.size), 0);
392 en->set.list = NULL; 391 INIT_LIST_HEAD(&en->set.list);
393 rwlock_init(&en->set.lock); 392 rwlock_init(&en->set.lock);
394 393
395 ip_vs_lblcr_hash(tbl, en); 394 ip_vs_lblcr_hash(tbl, en);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 3e7671674549..7fc49f4cf5ad 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/gfp.h>
22#include <linux/in.h> 23#include <linux/in.h>
23#include <linux/ip.h> 24#include <linux/ip.h>
24#include <net/protocol.h> 25#include <net/protocol.h>
@@ -257,6 +258,9 @@ int __init ip_vs_protocol_init(void)
257#ifdef CONFIG_IP_VS_PROTO_UDP 258#ifdef CONFIG_IP_VS_PROTO_UDP
258 REGISTER_PROTOCOL(&ip_vs_protocol_udp); 259 REGISTER_PROTOCOL(&ip_vs_protocol_udp);
259#endif 260#endif
261#ifdef CONFIG_IP_VS_PROTO_SCTP
262 REGISTER_PROTOCOL(&ip_vs_protocol_sctp);
263#endif
260#ifdef CONFIG_IP_VS_PROTO_AH 264#ifdef CONFIG_IP_VS_PROTO_AH
261 REGISTER_PROTOCOL(&ip_vs_protocol_ah); 265 REGISTER_PROTOCOL(&ip_vs_protocol_ah);
262#endif 266#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
new file mode 100644
index 000000000000..c9a3f7a21d53
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -0,0 +1,1183 @@
1#include <linux/kernel.h>
2#include <linux/ip.h>
3#include <linux/sctp.h>
4#include <net/ip.h>
5#include <net/ip6_checksum.h>
6#include <linux/netfilter.h>
7#include <linux/netfilter_ipv4.h>
8#include <net/sctp/checksum.h>
9#include <net/ip_vs.h>
10
11
12static struct ip_vs_conn *
13sctp_conn_in_get(int af,
14 const struct sk_buff *skb,
15 struct ip_vs_protocol *pp,
16 const struct ip_vs_iphdr *iph,
17 unsigned int proto_off,
18 int inverse)
19{
20 __be16 _ports[2], *pptr;
21
22 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
23 if (pptr == NULL)
24 return NULL;
25
26 if (likely(!inverse))
27 return ip_vs_conn_in_get(af, iph->protocol,
28 &iph->saddr, pptr[0],
29 &iph->daddr, pptr[1]);
30 else
31 return ip_vs_conn_in_get(af, iph->protocol,
32 &iph->daddr, pptr[1],
33 &iph->saddr, pptr[0]);
34}
35
36static struct ip_vs_conn *
37sctp_conn_out_get(int af,
38 const struct sk_buff *skb,
39 struct ip_vs_protocol *pp,
40 const struct ip_vs_iphdr *iph,
41 unsigned int proto_off,
42 int inverse)
43{
44 __be16 _ports[2], *pptr;
45
46 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
47 if (pptr == NULL)
48 return NULL;
49
50 if (likely(!inverse))
51 return ip_vs_conn_out_get(af, iph->protocol,
52 &iph->saddr, pptr[0],
53 &iph->daddr, pptr[1]);
54 else
55 return ip_vs_conn_out_get(af, iph->protocol,
56 &iph->daddr, pptr[1],
57 &iph->saddr, pptr[0]);
58}
59
60static int
61sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
62 int *verdict, struct ip_vs_conn **cpp)
63{
64 struct ip_vs_service *svc;
65 sctp_chunkhdr_t _schunkh, *sch;
66 sctp_sctphdr_t *sh, _sctph;
67 struct ip_vs_iphdr iph;
68
69 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
70
71 sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph);
72 if (sh == NULL)
73 return 0;
74
75 sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t),
76 sizeof(_schunkh), &_schunkh);
77 if (sch == NULL)
78 return 0;
79
80 if ((sch->type == SCTP_CID_INIT) &&
81 (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
82 &iph.daddr, sh->dest))) {
83 if (ip_vs_todrop()) {
84 /*
85 * It seems that we are very loaded.
86 * We have to drop this packet :(
87 */
88 ip_vs_service_put(svc);
89 *verdict = NF_DROP;
90 return 0;
91 }
92 /*
93 * Let the virtual server select a real server for the
94 * incoming connection, and create a connection entry.
95 */
96 *cpp = ip_vs_schedule(svc, skb);
97 if (!*cpp) {
98 *verdict = ip_vs_leave(svc, skb, pp);
99 return 0;
100 }
101 ip_vs_service_put(svc);
102 }
103
104 return 1;
105}
106
107static int
108sctp_snat_handler(struct sk_buff *skb,
109 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
110{
111 sctp_sctphdr_t *sctph;
112 unsigned int sctphoff;
113 __be32 crc32;
114
115#ifdef CONFIG_IP_VS_IPV6
116 if (cp->af == AF_INET6)
117 sctphoff = sizeof(struct ipv6hdr);
118 else
119#endif
120 sctphoff = ip_hdrlen(skb);
121
122 /* csum_check requires unshared skb */
123 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
124 return 0;
125
126 if (unlikely(cp->app != NULL)) {
127 /* Some checks before mangling */
128 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
129 return 0;
130
131 /* Call application helper if needed */
132 if (!ip_vs_app_pkt_out(cp, skb))
133 return 0;
134 }
135
136 sctph = (void *) skb_network_header(skb) + sctphoff;
137 sctph->source = cp->vport;
138
139 /* Calculate the checksum */
140 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
141 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
142 crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
143 crc32);
144 crc32 = sctp_end_cksum(crc32);
145 sctph->checksum = crc32;
146
147 return 1;
148}
149
150static int
151sctp_dnat_handler(struct sk_buff *skb,
152 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
153{
154
155 sctp_sctphdr_t *sctph;
156 unsigned int sctphoff;
157 __be32 crc32;
158
159#ifdef CONFIG_IP_VS_IPV6
160 if (cp->af == AF_INET6)
161 sctphoff = sizeof(struct ipv6hdr);
162 else
163#endif
164 sctphoff = ip_hdrlen(skb);
165
166 /* csum_check requires unshared skb */
167 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
168 return 0;
169
170 if (unlikely(cp->app != NULL)) {
171 /* Some checks before mangling */
172 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
173 return 0;
174
175 /* Call application helper if needed */
176 if (!ip_vs_app_pkt_out(cp, skb))
177 return 0;
178 }
179
180 sctph = (void *) skb_network_header(skb) + sctphoff;
181 sctph->dest = cp->dport;
182
183 /* Calculate the checksum */
184 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
185 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
186 crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
187 crc32);
188 crc32 = sctp_end_cksum(crc32);
189 sctph->checksum = crc32;
190
191 return 1;
192}
193
194static int
195sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
196{
197 struct sk_buff *list = skb_shinfo(skb)->frag_list;
198 unsigned int sctphoff;
199 struct sctphdr *sh, _sctph;
200 __le32 cmp;
201 __le32 val;
202 __u32 tmp;
203
204#ifdef CONFIG_IP_VS_IPV6
205 if (af == AF_INET6)
206 sctphoff = sizeof(struct ipv6hdr);
207 else
208#endif
209 sctphoff = ip_hdrlen(skb);
210
211 sh = skb_header_pointer(skb, sctphoff, sizeof(_sctph), &_sctph);
212 if (sh == NULL)
213 return 0;
214
215 cmp = sh->checksum;
216
217 tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
218 for (; list; list = list->next)
219 tmp = sctp_update_cksum((__u8 *) list->data,
220 skb_headlen(list), tmp);
221
222 val = sctp_end_cksum(tmp);
223
224 if (val != cmp) {
225 /* CRC failure, dump it. */
226 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
227 "Failed checksum for");
228 return 0;
229 }
230 return 1;
231}
232
233struct ipvs_sctp_nextstate {
234 int next_state;
235};
236enum ipvs_sctp_event_t {
237 IP_VS_SCTP_EVE_DATA_CLI,
238 IP_VS_SCTP_EVE_DATA_SER,
239 IP_VS_SCTP_EVE_INIT_CLI,
240 IP_VS_SCTP_EVE_INIT_SER,
241 IP_VS_SCTP_EVE_INIT_ACK_CLI,
242 IP_VS_SCTP_EVE_INIT_ACK_SER,
243 IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
244 IP_VS_SCTP_EVE_COOKIE_ECHO_SER,
245 IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
246 IP_VS_SCTP_EVE_COOKIE_ACK_SER,
247 IP_VS_SCTP_EVE_ABORT_CLI,
248 IP_VS_SCTP_EVE__ABORT_SER,
249 IP_VS_SCTP_EVE_SHUT_CLI,
250 IP_VS_SCTP_EVE_SHUT_SER,
251 IP_VS_SCTP_EVE_SHUT_ACK_CLI,
252 IP_VS_SCTP_EVE_SHUT_ACK_SER,
253 IP_VS_SCTP_EVE_SHUT_COM_CLI,
254 IP_VS_SCTP_EVE_SHUT_COM_SER,
255 IP_VS_SCTP_EVE_LAST
256};
257
258static enum ipvs_sctp_event_t sctp_events[255] = {
259 IP_VS_SCTP_EVE_DATA_CLI,
260 IP_VS_SCTP_EVE_INIT_CLI,
261 IP_VS_SCTP_EVE_INIT_ACK_CLI,
262 IP_VS_SCTP_EVE_DATA_CLI,
263 IP_VS_SCTP_EVE_DATA_CLI,
264 IP_VS_SCTP_EVE_DATA_CLI,
265 IP_VS_SCTP_EVE_ABORT_CLI,
266 IP_VS_SCTP_EVE_SHUT_CLI,
267 IP_VS_SCTP_EVE_SHUT_ACK_CLI,
268 IP_VS_SCTP_EVE_DATA_CLI,
269 IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
270 IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
271 IP_VS_SCTP_EVE_DATA_CLI,
272 IP_VS_SCTP_EVE_DATA_CLI,
273 IP_VS_SCTP_EVE_SHUT_COM_CLI,
274};
275
276static struct ipvs_sctp_nextstate
277 sctp_states_table[IP_VS_SCTP_S_LAST][IP_VS_SCTP_EVE_LAST] = {
278 /*
279 * STATE : IP_VS_SCTP_S_NONE
280 */
281 /*next state *//*event */
282 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
283 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
284 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
285 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
286 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
287 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
288 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
289 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
290 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
291 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
292 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
293 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
294 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
295 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
296 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
297 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
298 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
299 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ },
300 },
301 /*
302 * STATE : IP_VS_SCTP_S_INIT_CLI
303 * Cient sent INIT and is waiting for reply from server(In ECHO_WAIT)
304 */
305 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
306 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
307 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
308 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
309 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
310 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
311 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ECHO_CLI */ },
312 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_ECHO_SER */ },
313 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
314 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
315 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
316 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
317 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
318 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
319 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
320 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
321 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
322 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
323 },
324 /*
325 * State : IP_VS_SCTP_S_INIT_SER
326 * Server sent INIT and waiting for INIT ACK from the client
327 */
328 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
329 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
330 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
331 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
332 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
333 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
334 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
335 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
336 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
337 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
338 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
339 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
340 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
341 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
342 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
343 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
344 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
345 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
346 },
347 /*
348 * State : IP_VS_SCTP_S_INIT_ACK_CLI
349 * Client sent INIT ACK and waiting for ECHO from the server
350 */
351 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
352 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
353 /*
354 * We have got an INIT from client. From the spec.“Upon receipt of
355 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
356 * an INIT ACK using the same parameters it sent in its original
357 * INIT chunk (including its Initiate Tag, unchanged”).
358 */
359 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
360 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
361 /*
362 * INIT_ACK has been resent by the client, let us stay is in
363 * the same state
364 */
365 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
366 /*
367 * INIT_ACK sent by the server, close the connection
368 */
369 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
370 /*
371 * ECHO by client, it should not happen, close the connection
372 */
373 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
374 /*
375 * ECHO by server, this is what we are expecting, move to ECHO_SER
376 */
377 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
378 /*
379 * COOKIE ACK from client, it should not happen, close the connection
380 */
381 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
382 /*
383 * Unexpected COOKIE ACK from server, staty in the same state
384 */
385 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
386 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
387 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
388 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
389 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
390 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
391 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
392 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
393 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
394 },
395 /*
396 * State : IP_VS_SCTP_S_INIT_ACK_SER
397 * Server sent INIT ACK and waiting for ECHO from the client
398 */
399 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
400 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
401 /*
402 * We have got an INIT from client. From the spec.“Upon receipt of
403 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
404 * an INIT ACK using the same parameters it sent in its original
405 * INIT chunk (including its Initiate Tag, unchanged”).
406 */
407 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
408 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
409 /*
410 * Unexpected INIT_ACK by the client, let us close the connection
411 */
412 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
413 /*
414 * INIT_ACK resent by the server, let us move to same state
415 */
416 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
417 /*
418 * Client send the ECHO, this is what we are expecting,
419 * move to ECHO_CLI
420 */
421 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
422 /*
423 * ECHO received from the server, Not sure what to do,
424 * let us close it
425 */
426 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
427 /*
428 * COOKIE ACK from client, let us stay in the same state
429 */
430 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
431 /*
432 * COOKIE ACK from server, hmm... this should not happen, lets close
433 * the connection.
434 */
435 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
436 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
437 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
438 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
439 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
440 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
441 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
442 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
443 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
444 },
445 /*
446 * State : IP_VS_SCTP_S_ECHO_CLI
447 * Cient sent ECHO and waiting COOKEI ACK from the Server
448 */
449 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
450 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
451 /*
452 * We have got an INIT from client. From the spec.“Upon receipt of
453 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
454 * an INIT ACK using the same parameters it sent in its original
455 * INIT chunk (including its Initiate Tag, unchanged”).
456 */
457 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
458 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
459 /*
460 * INIT_ACK has been by the client, let us close the connection
461 */
462 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
463 /*
464 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
465 * “If an INIT ACK is received by an endpoint in any state other
466 * than the COOKIE-WAIT state, the endpoint should discard the
467 * INIT ACK chunk”. Stay in the same state
468 */
469 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
470 /*
471 * Client resent the ECHO, let us stay in the same state
472 */
473 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
474 /*
475 * ECHO received from the server, Not sure what to do,
476 * let us close it
477 */
478 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
479 /*
480 * COOKIE ACK from client, this shoud not happen, let's close the
481 * connection
482 */
483 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
484 /*
485 * COOKIE ACK from server, this is what we are awaiting,lets move to
486 * ESTABLISHED.
487 */
488 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
489 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
490 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
491 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
492 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
493 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
494 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
495 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
496 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
497 },
498 /*
499 * State : IP_VS_SCTP_S_ECHO_SER
500 * Server sent ECHO and waiting COOKEI ACK from the client
501 */
502 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
503 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
504 /*
505 * We have got an INIT from client. From the spec.“Upon receipt of
506 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
507 * an INIT ACK using the same parameters it sent in its original
508 * INIT chunk (including its Initiate Tag, unchanged”).
509 */
510 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
511 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
512 /*
513 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
514 * “If an INIT ACK is received by an endpoint in any state other
515 * than the COOKIE-WAIT state, the endpoint should discard the
516 * INIT ACK chunk”. Stay in the same state
517 */
518 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
519 /*
520 * INIT_ACK has been by the server, let us close the connection
521 */
522 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
523 /*
524 * Client sent the ECHO, not sure what to do, let's close the
525 * connection.
526 */
527 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
528 /*
529 * ECHO resent by the server, stay in the same state
530 */
531 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
532 /*
533 * COOKIE ACK from client, this is what we are expecting, let's move
534 * to ESTABLISHED.
535 */
536 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
537 /*
538 * COOKIE ACK from server, this should not happen, lets close the
539 * connection.
540 */
541 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
542 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
543 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
544 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
545 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
546 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
547 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
548 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
549 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
550 },
551 /*
552 * State : IP_VS_SCTP_S_ESTABLISHED
553 * Association established
554 */
555 {{IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_CLI */ },
556 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_SER */ },
557 /*
558 * We have got an INIT from client. From the spec.“Upon receipt of
559 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
560 * an INIT ACK using the same parameters it sent in its original
561 * INIT chunk (including its Initiate Tag, unchanged”).
562 */
563 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
564 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
565 /*
566 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
567 * “If an INIT ACK is received by an endpoint in any state other
568 * than the COOKIE-WAIT state, the endpoint should discard the
569 * INIT ACK chunk”. Stay in the same state
570 */
571 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
572 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
573 /*
574 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
575 * peer and peer shall move to the ESTABISHED. if it doesn't handle
576 * it will send ERROR chunk. So, stay in the same state
577 */
578 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
579 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
580 /*
581 * COOKIE ACK from client, not sure what to do stay in the same state
582 */
583 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
584 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
585 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
586 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
587 /*
588 * SHUTDOWN from the client, move to SHUDDOWN_CLI
589 */
590 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
591 /*
592 * SHUTDOWN from the server, move to SHUTDOWN_SER
593 */
594 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
595 /*
596 * client sent SHUDTDOWN_ACK, this should not happen, let's close
597 * the connection
598 */
599 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
600 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
601 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
602 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
603 },
604 /*
605 * State : IP_VS_SCTP_S_SHUT_CLI
606 * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server
607 */
608 /*
609 * We recieved the data chuck, keep the state unchanged. I assume
610 * that still data chuncks can be received by both the peers in
611 * SHUDOWN state
612 */
613
614 {{IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
615 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
616 /*
617 * We have got an INIT from client. From the spec.“Upon receipt of
618 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
619 * an INIT ACK using the same parameters it sent in its original
620 * INIT chunk (including its Initiate Tag, unchanged”).
621 */
622 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
623 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
624 /*
625 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
626 * “If an INIT ACK is received by an endpoint in any state other
627 * than the COOKIE-WAIT state, the endpoint should discard the
628 * INIT ACK chunk”. Stay in the same state
629 */
630 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
631 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
632 /*
633 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
634 * peer and peer shall move to the ESTABISHED. if it doesn't handle
635 * it will send ERROR chunk. So, stay in the same state
636 */
637 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
638 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
639 /*
640 * COOKIE ACK from client, not sure what to do stay in the same state
641 */
642 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
643 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
644 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
645 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
646 /*
647 * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
648 */
649 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
650 /*
651 * SHUTDOWN from the server, move to SHUTDOWN_SER
652 */
653 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
654 /*
655 * client sent SHUDTDOWN_ACK, this should not happen, let's close
656 * the connection
657 */
658 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
659 /*
660 * Server sent SHUTDOWN ACK, this is what we are expecting, let's move
661 * to SHUDOWN_ACK_SER
662 */
663 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
664 /*
665 * SHUTDOWN COM from client, this should not happen, let's close the
666 * connection
667 */
668 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
669 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
670 },
671 /*
672 * State : IP_VS_SCTP_S_SHUT_SER
673 * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client
674 */
675 /*
676 * We recieved the data chuck, keep the state unchanged. I assume
677 * that still data chuncks can be received by both the peers in
678 * SHUDOWN state
679 */
680
681 {{IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
682 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
683 /*
684 * We have got an INIT from client. From the spec.“Upon receipt of
685 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
686 * an INIT ACK using the same parameters it sent in its original
687 * INIT chunk (including its Initiate Tag, unchanged”).
688 */
689 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
690 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
691 /*
692 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
693 * “If an INIT ACK is received by an endpoint in any state other
694 * than the COOKIE-WAIT state, the endpoint should discard the
695 * INIT ACK chunk”. Stay in the same state
696 */
697 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
698 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
699 /*
700 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
701 * peer and peer shall move to the ESTABISHED. if it doesn't handle
702 * it will send ERROR chunk. So, stay in the same state
703 */
704 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
705 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
706 /*
707 * COOKIE ACK from client, not sure what to do stay in the same state
708 */
709 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
710 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
711 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
712 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
713 /*
714 * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
715 */
716 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
717 /*
718 * SHUTDOWN resent from the server, move to SHUTDOWN_SER
719 */
720 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
721 /*
722 * client sent SHUDTDOWN_ACK, this is what we are expecting, let's
723 * move to SHUT_ACK_CLI
724 */
725 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
726 /*
727 * Server sent SHUTDOWN ACK, this should not happen, let's close the
728 * connection
729 */
730 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
731 /*
732 * SHUTDOWN COM from client, this should not happen, let's close the
733 * connection
734 */
735 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
736 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
737 },
738
739 /*
740 * State : IP_VS_SCTP_S_SHUT_ACK_CLI
741 * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server
742 */
743 /*
744 * We recieved the data chuck, keep the state unchanged. I assume
745 * that still data chuncks can be received by both the peers in
746 * SHUDOWN state
747 */
748
749 {{IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
750 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
751 /*
752 * We have got an INIT from client. From the spec.“Upon receipt of
753 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
754 * an INIT ACK using the same parameters it sent in its original
755 * INIT chunk (including its Initiate Tag, unchanged”).
756 */
757 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
758 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
759 /*
760 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
761 * “If an INIT ACK is received by an endpoint in any state other
762 * than the COOKIE-WAIT state, the endpoint should discard the
763 * INIT ACK chunk”. Stay in the same state
764 */
765 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
766 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
767 /*
768 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
769 * peer and peer shall move to the ESTABISHED. if it doesn't handle
770 * it will send ERROR chunk. So, stay in the same state
771 */
772 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
773 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
774 /*
775 * COOKIE ACK from client, not sure what to do stay in the same state
776 */
777 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
778 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
779 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
780 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
781 /*
782 * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
783 */
784 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
785 /*
786 * SHUTDOWN sent from the server, move to SHUTDOWN_SER
787 */
788 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
789 /*
790 * client resent SHUDTDOWN_ACK, let's stay in the same state
791 */
792 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
793 /*
794 * Server sent SHUTDOWN ACK, this should not happen, let's close the
795 * connection
796 */
797 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
798 /*
799 * SHUTDOWN COM from client, this should not happen, let's close the
800 * connection
801 */
802 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
803 /*
804 * SHUTDOWN COMPLETE from server this is what we are expecting.
805 */
806 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
807 },
808
809 /*
810 * State : IP_VS_SCTP_S_SHUT_ACK_SER
811 * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client
812 */
813 /*
814 * We recieved the data chuck, keep the state unchanged. I assume
815 * that still data chuncks can be received by both the peers in
816 * SHUDOWN state
817 */
818
819 {{IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
820 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
821 /*
822 * We have got an INIT from client. From the spec.“Upon receipt of
823 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
824 * an INIT ACK using the same parameters it sent in its original
825 * INIT chunk (including its Initiate Tag, unchanged”).
826 */
827 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
828 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
829 /*
830 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
831 * “If an INIT ACK is received by an endpoint in any state other
832 * than the COOKIE-WAIT state, the endpoint should discard the
833 * INIT ACK chunk”. Stay in the same state
834 */
835 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
836 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
837 /*
838 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
839 * peer and peer shall move to the ESTABISHED. if it doesn't handle
840 * it will send ERROR chunk. So, stay in the same state
841 */
842 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
843 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
844 /*
845 * COOKIE ACK from client, not sure what to do stay in the same state
846 */
847 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
848 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
849 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
850 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
851 /*
852 * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
853 */
854 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
855 /*
856 * SHUTDOWN sent from the server, move to SHUTDOWN_SER
857 */
858 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
859 /*
860 * client sent SHUDTDOWN_ACK, this should not happen let's close
861 * the connection.
862 */
863 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
864 /*
865 * Server resent SHUTDOWN ACK, stay in the same state
866 */
867 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
868 /*
869 * SHUTDOWN COM from client, this what we are expecting, let's close
870 * the connection
871 */
872 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
873 /*
874 * SHUTDOWN COMPLETE from server this should not happen.
875 */
876 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
877 },
878 /*
879 * State : IP_VS_SCTP_S_CLOSED
880 */
881 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
882 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
883 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
884 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
885 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
886 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
887 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
888 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
889 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
890 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
891 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
892 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
893 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
894 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
895 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
896 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
897 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
898 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
899 }
900};
901
902/*
903 * Timeout table[state]
904 */
905static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
906 [IP_VS_SCTP_S_NONE] = 2 * HZ,
907 [IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ,
908 [IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ,
909 [IP_VS_SCTP_S_INIT_ACK_CLI] = 1 * 60 * HZ,
910 [IP_VS_SCTP_S_INIT_ACK_SER] = 1 * 60 * HZ,
911 [IP_VS_SCTP_S_ECHO_CLI] = 1 * 60 * HZ,
912 [IP_VS_SCTP_S_ECHO_SER] = 1 * 60 * HZ,
913 [IP_VS_SCTP_S_ESTABLISHED] = 15 * 60 * HZ,
914 [IP_VS_SCTP_S_SHUT_CLI] = 1 * 60 * HZ,
915 [IP_VS_SCTP_S_SHUT_SER] = 1 * 60 * HZ,
916 [IP_VS_SCTP_S_SHUT_ACK_CLI] = 1 * 60 * HZ,
917 [IP_VS_SCTP_S_SHUT_ACK_SER] = 1 * 60 * HZ,
918 [IP_VS_SCTP_S_CLOSED] = 10 * HZ,
919 [IP_VS_SCTP_S_LAST] = 2 * HZ,
920};
921
922static const char *sctp_state_name_table[IP_VS_SCTP_S_LAST + 1] = {
923 [IP_VS_SCTP_S_NONE] = "NONE",
924 [IP_VS_SCTP_S_INIT_CLI] = "INIT_CLI",
925 [IP_VS_SCTP_S_INIT_SER] = "INIT_SER",
926 [IP_VS_SCTP_S_INIT_ACK_CLI] = "INIT_ACK_CLI",
927 [IP_VS_SCTP_S_INIT_ACK_SER] = "INIT_ACK_SER",
928 [IP_VS_SCTP_S_ECHO_CLI] = "COOKIE_ECHO_CLI",
929 [IP_VS_SCTP_S_ECHO_SER] = "COOKIE_ECHO_SER",
930 [IP_VS_SCTP_S_ESTABLISHED] = "ESTABISHED",
931 [IP_VS_SCTP_S_SHUT_CLI] = "SHUTDOWN_CLI",
932 [IP_VS_SCTP_S_SHUT_SER] = "SHUTDOWN_SER",
933 [IP_VS_SCTP_S_SHUT_ACK_CLI] = "SHUTDOWN_ACK_CLI",
934 [IP_VS_SCTP_S_SHUT_ACK_SER] = "SHUTDOWN_ACK_SER",
935 [IP_VS_SCTP_S_CLOSED] = "CLOSED",
936 [IP_VS_SCTP_S_LAST] = "BUG!"
937};
938
939
940static const char *sctp_state_name(int state)
941{
942 if (state >= IP_VS_SCTP_S_LAST)
943 return "ERR!";
944 if (sctp_state_name_table[state])
945 return sctp_state_name_table[state];
946 return "?";
947}
948
949static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
950{
951}
952
953static int
954sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
955{
956
957return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
958 sctp_state_name_table, sname, to);
959}
960
961static inline int
962set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
963 int direction, const struct sk_buff *skb)
964{
965 sctp_chunkhdr_t _sctpch, *sch;
966 unsigned char chunk_type;
967 int event, next_state;
968 int ihl;
969
970#ifdef CONFIG_IP_VS_IPV6
971 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
972#else
973 ihl = ip_hdrlen(skb);
974#endif
975
976 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
977 sizeof(_sctpch), &_sctpch);
978 if (sch == NULL)
979 return 0;
980
981 chunk_type = sch->type;
982 /*
983 * Section 3: Multiple chunks can be bundled into one SCTP packet
984 * up to the MTU size, except for the INIT, INIT ACK, and
985 * SHUTDOWN COMPLETE chunks. These chunks MUST NOT be bundled with
986 * any other chunk in a packet.
987 *
988 * Section 3.3.7: DATA chunks MUST NOT be bundled with ABORT. Control
989 * chunks (except for INIT, INIT ACK, and SHUTDOWN COMPLETE) MAY be
990 * bundled with an ABORT, but they MUST be placed before the ABORT
991 * in the SCTP packet or they will be ignored by the receiver.
992 */
993 if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
994 (sch->type == SCTP_CID_COOKIE_ACK)) {
995 sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) +
996 sch->length), sizeof(_sctpch), &_sctpch);
997 if (sch) {
998 if (sch->type == SCTP_CID_ABORT)
999 chunk_type = sch->type;
1000 }
1001 }
1002
1003 event = sctp_events[chunk_type];
1004
1005 /*
1006 * If the direction is IP_VS_DIR_OUTPUT, this event is from server
1007 */
1008 if (direction == IP_VS_DIR_OUTPUT)
1009 event++;
1010 /*
1011 * get next state
1012 */
1013 next_state = sctp_states_table[cp->state][event].next_state;
1014
1015 if (next_state != cp->state) {
1016 struct ip_vs_dest *dest = cp->dest;
1017
1018 IP_VS_DBG_BUF(8, "%s %s %s:%d->"
1019 "%s:%d state: %s->%s conn->refcnt:%d\n",
1020 pp->name,
1021 ((direction == IP_VS_DIR_OUTPUT) ?
1022 "output " : "input "),
1023 IP_VS_DBG_ADDR(cp->af, &cp->daddr),
1024 ntohs(cp->dport),
1025 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1026 ntohs(cp->cport),
1027 sctp_state_name(cp->state),
1028 sctp_state_name(next_state),
1029 atomic_read(&cp->refcnt));
1030 if (dest) {
1031 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
1032 (next_state != IP_VS_SCTP_S_ESTABLISHED)) {
1033 atomic_dec(&dest->activeconns);
1034 atomic_inc(&dest->inactconns);
1035 cp->flags |= IP_VS_CONN_F_INACTIVE;
1036 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
1037 (next_state == IP_VS_SCTP_S_ESTABLISHED)) {
1038 atomic_inc(&dest->activeconns);
1039 atomic_dec(&dest->inactconns);
1040 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
1041 }
1042 }
1043 }
1044
1045 cp->timeout = pp->timeout_table[cp->state = next_state];
1046
1047 return 1;
1048}
1049
1050static int
1051sctp_state_transition(struct ip_vs_conn *cp, int direction,
1052 const struct sk_buff *skb, struct ip_vs_protocol *pp)
1053{
1054 int ret = 0;
1055
1056 spin_lock(&cp->lock);
1057 ret = set_sctp_state(pp, cp, direction, skb);
1058 spin_unlock(&cp->lock);
1059
1060 return ret;
1061}
1062
1063/*
1064 * Hash table for SCTP application incarnations
1065 */
1066#define SCTP_APP_TAB_BITS 4
1067#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
1068#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
1069
1070static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
1071static DEFINE_SPINLOCK(sctp_app_lock);
1072
1073static inline __u16 sctp_app_hashkey(__be16 port)
1074{
1075 return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
1076 & SCTP_APP_TAB_MASK;
1077}
1078
1079static int sctp_register_app(struct ip_vs_app *inc)
1080{
1081 struct ip_vs_app *i;
1082 __u16 hash;
1083 __be16 port = inc->port;
1084 int ret = 0;
1085
1086 hash = sctp_app_hashkey(port);
1087
1088 spin_lock_bh(&sctp_app_lock);
1089 list_for_each_entry(i, &sctp_apps[hash], p_list) {
1090 if (i->port == port) {
1091 ret = -EEXIST;
1092 goto out;
1093 }
1094 }
1095 list_add(&inc->p_list, &sctp_apps[hash]);
1096 atomic_inc(&ip_vs_protocol_sctp.appcnt);
1097out:
1098 spin_unlock_bh(&sctp_app_lock);
1099
1100 return ret;
1101}
1102
1103static void sctp_unregister_app(struct ip_vs_app *inc)
1104{
1105 spin_lock_bh(&sctp_app_lock);
1106 atomic_dec(&ip_vs_protocol_sctp.appcnt);
1107 list_del(&inc->p_list);
1108 spin_unlock_bh(&sctp_app_lock);
1109}
1110
1111static int sctp_app_conn_bind(struct ip_vs_conn *cp)
1112{
1113 int hash;
1114 struct ip_vs_app *inc;
1115 int result = 0;
1116
1117 /* Default binding: bind app only for NAT */
1118 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1119 return 0;
1120 /* Lookup application incarnations and bind the right one */
1121 hash = sctp_app_hashkey(cp->vport);
1122
1123 spin_lock(&sctp_app_lock);
1124 list_for_each_entry(inc, &sctp_apps[hash], p_list) {
1125 if (inc->port == cp->vport) {
1126 if (unlikely(!ip_vs_app_inc_get(inc)))
1127 break;
1128 spin_unlock(&sctp_app_lock);
1129
1130 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
1131 "%s:%u to app %s on port %u\n",
1132 __func__,
1133 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1134 ntohs(cp->cport),
1135 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1136 ntohs(cp->vport),
1137 inc->name, ntohs(inc->port));
1138 cp->app = inc;
1139 if (inc->init_conn)
1140 result = inc->init_conn(inc, cp);
1141 goto out;
1142 }
1143 }
1144 spin_unlock(&sctp_app_lock);
1145out:
1146 return result;
1147}
1148
1149static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
1150{
1151 IP_VS_INIT_HASH_TABLE(sctp_apps);
1152 pp->timeout_table = sctp_timeouts;
1153}
1154
1155
1156static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
1157{
1158
1159}
1160
1161struct ip_vs_protocol ip_vs_protocol_sctp = {
1162 .name = "SCTP",
1163 .protocol = IPPROTO_SCTP,
1164 .num_states = IP_VS_SCTP_S_LAST,
1165 .dont_defrag = 0,
1166 .appcnt = ATOMIC_INIT(0),
1167 .init = ip_vs_sctp_init,
1168 .exit = ip_vs_sctp_exit,
1169 .register_app = sctp_register_app,
1170 .unregister_app = sctp_unregister_app,
1171 .conn_schedule = sctp_conn_schedule,
1172 .conn_in_get = sctp_conn_in_get,
1173 .conn_out_get = sctp_conn_out_get,
1174 .snat_handler = sctp_snat_handler,
1175 .dnat_handler = sctp_dnat_handler,
1176 .csum_check = sctp_csum_check,
1177 .state_name = sctp_state_name,
1178 .state_transition = sctp_state_transition,
1179 .app_conn_bind = sctp_app_conn_bind,
1180 .debug_packet = ip_vs_tcpudp_debug_packet,
1181 .timeout_change = sctp_timeout_change,
1182 .set_state_timeout = sctp_set_state_timeout,
1183};
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 8e6cfd36e6f0..e6cc174fbc06 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -36,6 +36,7 @@
36#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 36#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
37 37
38#include <linux/ip.h> 38#include <linux/ip.h>
39#include <linux/slab.h>
39#include <linux/module.h> 40#include <linux/module.h>
40#include <linux/kernel.h> 41#include <linux/kernel.h>
41#include <linux/skbuff.h> 42#include <linux/skbuff.h>
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index e177f0dc2084..8fb0ae616761 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -400,6 +400,11 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
400 flags |= IP_VS_CONN_F_INACTIVE; 400 flags |= IP_VS_CONN_F_INACTIVE;
401 else 401 else
402 flags &= ~IP_VS_CONN_F_INACTIVE; 402 flags &= ~IP_VS_CONN_F_INACTIVE;
403 } else if (s->protocol == IPPROTO_SCTP) {
404 if (state != IP_VS_SCTP_S_ESTABLISHED)
405 flags |= IP_VS_CONN_F_INACTIVE;
406 else
407 flags &= ~IP_VS_CONN_F_INACTIVE;
403 } 408 }
404 cp = ip_vs_conn_new(AF_INET, s->protocol, 409 cp = ip_vs_conn_new(AF_INET, s->protocol,
405 (union nf_inet_addr *)&s->caddr, 410 (union nf_inet_addr *)&s->caddr,
@@ -434,6 +439,15 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
434 atomic_dec(&dest->inactconns); 439 atomic_dec(&dest->inactconns);
435 cp->flags &= ~IP_VS_CONN_F_INACTIVE; 440 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
436 } 441 }
442 } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
443 (cp->state != state)) {
444 dest = cp->dest;
445 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
446 (state != IP_VS_SCTP_S_ESTABLISHED)) {
447 atomic_dec(&dest->activeconns);
448 atomic_inc(&dest->inactconns);
449 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
450 }
437 } 451 }
438 452
439 if (opt) 453 if (opt)
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 3c115fc19784..30db633f88f1 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <linux/net.h> 27#include <linux/net.h>
27#include <linux/gcd.h> 28#include <linux/gcd.h>
28 29
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 30b3189bd29c..e450cd6f4eb5 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -17,6 +17,7 @@
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/slab.h>
20#include <linux/tcp.h> /* for tcphdr */ 21#include <linux/tcp.h> /* for tcphdr */
21#include <net/ip.h> 22#include <net/ip.h>
22#include <net/tcp.h> /* for csum_tcpudp_magic */ 23#include <net/tcp.h> /* for csum_tcpudp_magic */
@@ -311,7 +312,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
311 mtu = dst_mtu(&rt->u.dst); 312 mtu = dst_mtu(&rt->u.dst);
312 if (skb->len > mtu) { 313 if (skb->len > mtu) {
313 dst_release(&rt->u.dst); 314 dst_release(&rt->u.dst);
314 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 315 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
315 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 316 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
316 goto tx_error; 317 goto tx_error;
317 } 318 }
@@ -454,7 +455,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
454 mtu = dst_mtu(&rt->u.dst); 455 mtu = dst_mtu(&rt->u.dst);
455 if (skb->len > mtu) { 456 if (skb->len > mtu) {
456 dst_release(&rt->u.dst); 457 dst_release(&rt->u.dst);
457 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 458 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
458 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 459 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
459 "ip_vs_nat_xmit_v6(): frag needed for"); 460 "ip_vs_nat_xmit_v6(): frag needed for");
460 goto tx_error; 461 goto tx_error;
@@ -672,7 +673,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
672 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 673 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
673 674
674 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { 675 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
675 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 676 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
676 dst_release(&rt->u.dst); 677 dst_release(&rt->u.dst);
677 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 678 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
678 goto tx_error; 679 goto tx_error;
@@ -814,7 +815,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
814 /* MTU checking */ 815 /* MTU checking */
815 mtu = dst_mtu(&rt->u.dst); 816 mtu = dst_mtu(&rt->u.dst);
816 if (skb->len > mtu) { 817 if (skb->len > mtu) {
817 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 818 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
818 dst_release(&rt->u.dst); 819 dst_release(&rt->u.dst);
819 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 820 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
820 goto tx_error; 821 goto tx_error;
@@ -965,7 +966,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
965 mtu = dst_mtu(&rt->u.dst); 966 mtu = dst_mtu(&rt->u.dst);
966 if (skb->len > mtu) { 967 if (skb->len > mtu) {
967 dst_release(&rt->u.dst); 968 dst_release(&rt->u.dst);
968 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 969 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
969 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 970 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
970 goto tx_error; 971 goto tx_error;
971 } 972 }
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 018f90db511c..ab81b380eae6 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/netfilter.h> 11#include <linux/netfilter.h>
12#include <linux/slab.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
14 15
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 07d9d8857e5d..372e80f07a81 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -16,6 +16,7 @@
16#include <linux/in.h> 16#include <linux/in.h>
17#include <linux/udp.h> 17#include <linux/udp.h>
18#include <linux/netfilter.h> 18#include <linux/netfilter.h>
19#include <linux/gfp.h>
19 20
20#include <net/netfilter/nf_conntrack.h> 21#include <net/netfilter/nf_conntrack.h>
21#include <net/netfilter/nf_conntrack_expect.h> 22#include <net/netfilter/nf_conntrack_expect.h>
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 4d79e3c1616c..0c9bbe93cc16 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -42,6 +42,7 @@
42#include <net/netfilter/nf_conntrack_extend.h> 42#include <net/netfilter/nf_conntrack_extend.h>
43#include <net/netfilter/nf_conntrack_acct.h> 43#include <net/netfilter/nf_conntrack_acct.h>
44#include <net/netfilter/nf_conntrack_ecache.h> 44#include <net/netfilter/nf_conntrack_ecache.h>
45#include <net/netfilter/nf_conntrack_zones.h>
45#include <net/netfilter/nf_nat.h> 46#include <net/netfilter/nf_nat.h>
46#include <net/netfilter/nf_nat_core.h> 47#include <net/netfilter/nf_nat_core.h>
47 48
@@ -68,7 +69,7 @@ static int nf_conntrack_hash_rnd_initted;
68static unsigned int nf_conntrack_hash_rnd; 69static unsigned int nf_conntrack_hash_rnd;
69 70
70static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 71static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
71 unsigned int size, unsigned int rnd) 72 u16 zone, unsigned int size, unsigned int rnd)
72{ 73{
73 unsigned int n; 74 unsigned int n;
74 u_int32_t h; 75 u_int32_t h;
@@ -79,16 +80,16 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
79 */ 80 */
80 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 81 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
81 h = jhash2((u32 *)tuple, n, 82 h = jhash2((u32 *)tuple, n,
82 rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 83 zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
83 tuple->dst.protonum)); 84 tuple->dst.protonum));
84 85
85 return ((u64)h * size) >> 32; 86 return ((u64)h * size) >> 32;
86} 87}
87 88
88static inline u_int32_t hash_conntrack(const struct net *net, 89static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
89 const struct nf_conntrack_tuple *tuple) 90 const struct nf_conntrack_tuple *tuple)
90{ 91{
91 return __hash_conntrack(tuple, net->ct.htable_size, 92 return __hash_conntrack(tuple, zone, net->ct.htable_size,
92 nf_conntrack_hash_rnd); 93 nf_conntrack_hash_rnd);
93} 94}
94 95
@@ -292,11 +293,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
292 * - Caller must lock nf_conntrack_lock before calling this function 293 * - Caller must lock nf_conntrack_lock before calling this function
293 */ 294 */
294struct nf_conntrack_tuple_hash * 295struct nf_conntrack_tuple_hash *
295__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) 296__nf_conntrack_find(struct net *net, u16 zone,
297 const struct nf_conntrack_tuple *tuple)
296{ 298{
297 struct nf_conntrack_tuple_hash *h; 299 struct nf_conntrack_tuple_hash *h;
298 struct hlist_nulls_node *n; 300 struct hlist_nulls_node *n;
299 unsigned int hash = hash_conntrack(net, tuple); 301 unsigned int hash = hash_conntrack(net, zone, tuple);
300 302
301 /* Disable BHs the entire time since we normally need to disable them 303 /* Disable BHs the entire time since we normally need to disable them
302 * at least once for the stats anyway. 304 * at least once for the stats anyway.
@@ -304,7 +306,8 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
304 local_bh_disable(); 306 local_bh_disable();
305begin: 307begin:
306 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 308 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
307 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 309 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
310 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
308 NF_CT_STAT_INC(net, found); 311 NF_CT_STAT_INC(net, found);
309 local_bh_enable(); 312 local_bh_enable();
310 return h; 313 return h;
@@ -326,21 +329,23 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
326 329
327/* Find a connection corresponding to a tuple. */ 330/* Find a connection corresponding to a tuple. */
328struct nf_conntrack_tuple_hash * 331struct nf_conntrack_tuple_hash *
329nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 332nf_conntrack_find_get(struct net *net, u16 zone,
333 const struct nf_conntrack_tuple *tuple)
330{ 334{
331 struct nf_conntrack_tuple_hash *h; 335 struct nf_conntrack_tuple_hash *h;
332 struct nf_conn *ct; 336 struct nf_conn *ct;
333 337
334 rcu_read_lock(); 338 rcu_read_lock();
335begin: 339begin:
336 h = __nf_conntrack_find(net, tuple); 340 h = __nf_conntrack_find(net, zone, tuple);
337 if (h) { 341 if (h) {
338 ct = nf_ct_tuplehash_to_ctrack(h); 342 ct = nf_ct_tuplehash_to_ctrack(h);
339 if (unlikely(nf_ct_is_dying(ct) || 343 if (unlikely(nf_ct_is_dying(ct) ||
340 !atomic_inc_not_zero(&ct->ct_general.use))) 344 !atomic_inc_not_zero(&ct->ct_general.use)))
341 h = NULL; 345 h = NULL;
342 else { 346 else {
343 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { 347 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
348 nf_ct_zone(ct) != zone)) {
344 nf_ct_put(ct); 349 nf_ct_put(ct);
345 goto begin; 350 goto begin;
346 } 351 }
@@ -368,9 +373,11 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
368{ 373{
369 struct net *net = nf_ct_net(ct); 374 struct net *net = nf_ct_net(ct);
370 unsigned int hash, repl_hash; 375 unsigned int hash, repl_hash;
376 u16 zone;
371 377
372 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 378 zone = nf_ct_zone(ct);
373 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 379 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
380 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
374 381
375 __nf_conntrack_hash_insert(ct, hash, repl_hash); 382 __nf_conntrack_hash_insert(ct, hash, repl_hash);
376} 383}
@@ -387,6 +394,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
387 struct hlist_nulls_node *n; 394 struct hlist_nulls_node *n;
388 enum ip_conntrack_info ctinfo; 395 enum ip_conntrack_info ctinfo;
389 struct net *net; 396 struct net *net;
397 u16 zone;
390 398
391 ct = nf_ct_get(skb, &ctinfo); 399 ct = nf_ct_get(skb, &ctinfo);
392 net = nf_ct_net(ct); 400 net = nf_ct_net(ct);
@@ -398,8 +406,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 406 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
399 return NF_ACCEPT; 407 return NF_ACCEPT;
400 408
401 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 409 zone = nf_ct_zone(ct);
402 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 410 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
411 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
403 412
404 /* We're not in hash table, and we refuse to set up related 413 /* We're not in hash table, and we refuse to set up related
405 connections for unconfirmed conns. But packet copies and 414 connections for unconfirmed conns. But packet copies and
@@ -418,11 +427,13 @@ __nf_conntrack_confirm(struct sk_buff *skb)
418 not in the hash. If there is, we lost race. */ 427 not in the hash. If there is, we lost race. */
419 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) 428 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
420 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 429 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
421 &h->tuple)) 430 &h->tuple) &&
431 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
422 goto out; 432 goto out;
423 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) 433 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
424 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 434 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
425 &h->tuple)) 435 &h->tuple) &&
436 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
426 goto out; 437 goto out;
427 438
428 /* Remove from unconfirmed list */ 439 /* Remove from unconfirmed list */
@@ -469,15 +480,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
469 struct net *net = nf_ct_net(ignored_conntrack); 480 struct net *net = nf_ct_net(ignored_conntrack);
470 struct nf_conntrack_tuple_hash *h; 481 struct nf_conntrack_tuple_hash *h;
471 struct hlist_nulls_node *n; 482 struct hlist_nulls_node *n;
472 unsigned int hash = hash_conntrack(net, tuple); 483 struct nf_conn *ct;
484 u16 zone = nf_ct_zone(ignored_conntrack);
485 unsigned int hash = hash_conntrack(net, zone, tuple);
473 486
474 /* Disable BHs the entire time since we need to disable them at 487 /* Disable BHs the entire time since we need to disable them at
475 * least once for the stats anyway. 488 * least once for the stats anyway.
476 */ 489 */
477 rcu_read_lock_bh(); 490 rcu_read_lock_bh();
478 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 491 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
479 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 492 ct = nf_ct_tuplehash_to_ctrack(h);
480 nf_ct_tuple_equal(tuple, &h->tuple)) { 493 if (ct != ignored_conntrack &&
494 nf_ct_tuple_equal(tuple, &h->tuple) &&
495 nf_ct_zone(ct) == zone) {
481 NF_CT_STAT_INC(net, found); 496 NF_CT_STAT_INC(net, found);
482 rcu_read_unlock_bh(); 497 rcu_read_unlock_bh();
483 return 1; 498 return 1;
@@ -540,7 +555,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
540 return dropped; 555 return dropped;
541} 556}
542 557
543struct nf_conn *nf_conntrack_alloc(struct net *net, 558struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
544 const struct nf_conntrack_tuple *orig, 559 const struct nf_conntrack_tuple *orig,
545 const struct nf_conntrack_tuple *repl, 560 const struct nf_conntrack_tuple *repl,
546 gfp_t gfp) 561 gfp_t gfp)
@@ -558,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
558 573
559 if (nf_conntrack_max && 574 if (nf_conntrack_max &&
560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 575 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
561 unsigned int hash = hash_conntrack(net, orig); 576 unsigned int hash = hash_conntrack(net, zone, orig);
562 if (!early_drop(net, hash)) { 577 if (!early_drop(net, hash)) {
563 atomic_dec(&net->ct.count); 578 atomic_dec(&net->ct.count);
564 if (net_ratelimit()) 579 if (net_ratelimit())
@@ -595,13 +610,28 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
595#ifdef CONFIG_NET_NS 610#ifdef CONFIG_NET_NS
596 ct->ct_net = net; 611 ct->ct_net = net;
597#endif 612#endif
598 613#ifdef CONFIG_NF_CONNTRACK_ZONES
614 if (zone) {
615 struct nf_conntrack_zone *nf_ct_zone;
616
617 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
618 if (!nf_ct_zone)
619 goto out_free;
620 nf_ct_zone->id = zone;
621 }
622#endif
599 /* 623 /*
600 * changes to lookup keys must be done before setting refcnt to 1 624 * changes to lookup keys must be done before setting refcnt to 1
601 */ 625 */
602 smp_wmb(); 626 smp_wmb();
603 atomic_set(&ct->ct_general.use, 1); 627 atomic_set(&ct->ct_general.use, 1);
604 return ct; 628 return ct;
629
630#ifdef CONFIG_NF_CONNTRACK_ZONES
631out_free:
632 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
633 return ERR_PTR(-ENOMEM);
634#endif
605} 635}
606EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 636EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
607 637
@@ -619,7 +649,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
619/* Allocate a new conntrack: we return -ENOMEM if classification 649/* Allocate a new conntrack: we return -ENOMEM if classification
620 failed due to stress. Otherwise it really is unclassifiable. */ 650 failed due to stress. Otherwise it really is unclassifiable. */
621static struct nf_conntrack_tuple_hash * 651static struct nf_conntrack_tuple_hash *
622init_conntrack(struct net *net, 652init_conntrack(struct net *net, struct nf_conn *tmpl,
623 const struct nf_conntrack_tuple *tuple, 653 const struct nf_conntrack_tuple *tuple,
624 struct nf_conntrack_l3proto *l3proto, 654 struct nf_conntrack_l3proto *l3proto,
625 struct nf_conntrack_l4proto *l4proto, 655 struct nf_conntrack_l4proto *l4proto,
@@ -629,14 +659,16 @@ init_conntrack(struct net *net,
629 struct nf_conn *ct; 659 struct nf_conn *ct;
630 struct nf_conn_help *help; 660 struct nf_conn_help *help;
631 struct nf_conntrack_tuple repl_tuple; 661 struct nf_conntrack_tuple repl_tuple;
662 struct nf_conntrack_ecache *ecache;
632 struct nf_conntrack_expect *exp; 663 struct nf_conntrack_expect *exp;
664 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
633 665
634 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 666 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
635 pr_debug("Can't invert tuple.\n"); 667 pr_debug("Can't invert tuple.\n");
636 return NULL; 668 return NULL;
637 } 669 }
638 670
639 ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); 671 ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
640 if (IS_ERR(ct)) { 672 if (IS_ERR(ct)) {
641 pr_debug("Can't allocate conntrack.\n"); 673 pr_debug("Can't allocate conntrack.\n");
642 return (struct nf_conntrack_tuple_hash *)ct; 674 return (struct nf_conntrack_tuple_hash *)ct;
@@ -649,10 +681,14 @@ init_conntrack(struct net *net,
649 } 681 }
650 682
651 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 683 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
652 nf_ct_ecache_ext_add(ct, GFP_ATOMIC); 684
685 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
686 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
687 ecache ? ecache->expmask : 0,
688 GFP_ATOMIC);
653 689
654 spin_lock_bh(&nf_conntrack_lock); 690 spin_lock_bh(&nf_conntrack_lock);
655 exp = nf_ct_find_expectation(net, tuple); 691 exp = nf_ct_find_expectation(net, zone, tuple);
656 if (exp) { 692 if (exp) {
657 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 693 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
658 ct, exp); 694 ct, exp);
@@ -674,7 +710,7 @@ init_conntrack(struct net *net,
674 nf_conntrack_get(&ct->master->ct_general); 710 nf_conntrack_get(&ct->master->ct_general);
675 NF_CT_STAT_INC(net, expect_new); 711 NF_CT_STAT_INC(net, expect_new);
676 } else { 712 } else {
677 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 713 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
678 NF_CT_STAT_INC(net, new); 714 NF_CT_STAT_INC(net, new);
679 } 715 }
680 716
@@ -695,7 +731,7 @@ init_conntrack(struct net *net,
695 731
696/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 732/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
697static inline struct nf_conn * 733static inline struct nf_conn *
698resolve_normal_ct(struct net *net, 734resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
699 struct sk_buff *skb, 735 struct sk_buff *skb,
700 unsigned int dataoff, 736 unsigned int dataoff,
701 u_int16_t l3num, 737 u_int16_t l3num,
@@ -708,6 +744,7 @@ resolve_normal_ct(struct net *net,
708 struct nf_conntrack_tuple tuple; 744 struct nf_conntrack_tuple tuple;
709 struct nf_conntrack_tuple_hash *h; 745 struct nf_conntrack_tuple_hash *h;
710 struct nf_conn *ct; 746 struct nf_conn *ct;
747 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
711 748
712 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 749 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
713 dataoff, l3num, protonum, &tuple, l3proto, 750 dataoff, l3num, protonum, &tuple, l3proto,
@@ -717,9 +754,10 @@ resolve_normal_ct(struct net *net,
717 } 754 }
718 755
719 /* look for tuple match */ 756 /* look for tuple match */
720 h = nf_conntrack_find_get(net, &tuple); 757 h = nf_conntrack_find_get(net, zone, &tuple);
721 if (!h) { 758 if (!h) {
722 h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff); 759 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
760 skb, dataoff);
723 if (!h) 761 if (!h)
724 return NULL; 762 return NULL;
725 if (IS_ERR(h)) 763 if (IS_ERR(h))
@@ -756,7 +794,7 @@ unsigned int
756nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, 794nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
757 struct sk_buff *skb) 795 struct sk_buff *skb)
758{ 796{
759 struct nf_conn *ct; 797 struct nf_conn *ct, *tmpl = NULL;
760 enum ip_conntrack_info ctinfo; 798 enum ip_conntrack_info ctinfo;
761 struct nf_conntrack_l3proto *l3proto; 799 struct nf_conntrack_l3proto *l3proto;
762 struct nf_conntrack_l4proto *l4proto; 800 struct nf_conntrack_l4proto *l4proto;
@@ -765,10 +803,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
765 int set_reply = 0; 803 int set_reply = 0;
766 int ret; 804 int ret;
767 805
768 /* Previously seen (loopback or untracked)? Ignore. */
769 if (skb->nfct) { 806 if (skb->nfct) {
770 NF_CT_STAT_INC_ATOMIC(net, ignore); 807 /* Previously seen (loopback or untracked)? Ignore. */
771 return NF_ACCEPT; 808 tmpl = (struct nf_conn *)skb->nfct;
809 if (!nf_ct_is_template(tmpl)) {
810 NF_CT_STAT_INC_ATOMIC(net, ignore);
811 return NF_ACCEPT;
812 }
813 skb->nfct = NULL;
772 } 814 }
773 815
774 /* rcu_read_lock()ed by nf_hook_slow */ 816 /* rcu_read_lock()ed by nf_hook_slow */
@@ -779,7 +821,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
779 pr_debug("not prepared to track yet or error occured\n"); 821 pr_debug("not prepared to track yet or error occured\n");
780 NF_CT_STAT_INC_ATOMIC(net, error); 822 NF_CT_STAT_INC_ATOMIC(net, error);
781 NF_CT_STAT_INC_ATOMIC(net, invalid); 823 NF_CT_STAT_INC_ATOMIC(net, invalid);
782 return -ret; 824 ret = -ret;
825 goto out;
783 } 826 }
784 827
785 l4proto = __nf_ct_l4proto_find(pf, protonum); 828 l4proto = __nf_ct_l4proto_find(pf, protonum);
@@ -788,26 +831,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
788 * inverse of the return code tells to the netfilter 831 * inverse of the return code tells to the netfilter
789 * core what to do with the packet. */ 832 * core what to do with the packet. */
790 if (l4proto->error != NULL) { 833 if (l4proto->error != NULL) {
791 ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum); 834 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
835 pf, hooknum);
792 if (ret <= 0) { 836 if (ret <= 0) {
793 NF_CT_STAT_INC_ATOMIC(net, error); 837 NF_CT_STAT_INC_ATOMIC(net, error);
794 NF_CT_STAT_INC_ATOMIC(net, invalid); 838 NF_CT_STAT_INC_ATOMIC(net, invalid);
795 return -ret; 839 ret = -ret;
840 goto out;
796 } 841 }
797 } 842 }
798 843
799 ct = resolve_normal_ct(net, skb, dataoff, pf, protonum, 844 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
800 l3proto, l4proto, &set_reply, &ctinfo); 845 l3proto, l4proto, &set_reply, &ctinfo);
801 if (!ct) { 846 if (!ct) {
802 /* Not valid part of a connection */ 847 /* Not valid part of a connection */
803 NF_CT_STAT_INC_ATOMIC(net, invalid); 848 NF_CT_STAT_INC_ATOMIC(net, invalid);
804 return NF_ACCEPT; 849 ret = NF_ACCEPT;
850 goto out;
805 } 851 }
806 852
807 if (IS_ERR(ct)) { 853 if (IS_ERR(ct)) {
808 /* Too stressed to deal. */ 854 /* Too stressed to deal. */
809 NF_CT_STAT_INC_ATOMIC(net, drop); 855 NF_CT_STAT_INC_ATOMIC(net, drop);
810 return NF_DROP; 856 ret = NF_DROP;
857 goto out;
811 } 858 }
812 859
813 NF_CT_ASSERT(skb->nfct); 860 NF_CT_ASSERT(skb->nfct);
@@ -822,11 +869,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
822 NF_CT_STAT_INC_ATOMIC(net, invalid); 869 NF_CT_STAT_INC_ATOMIC(net, invalid);
823 if (ret == -NF_DROP) 870 if (ret == -NF_DROP)
824 NF_CT_STAT_INC_ATOMIC(net, drop); 871 NF_CT_STAT_INC_ATOMIC(net, drop);
825 return -ret; 872 ret = -ret;
873 goto out;
826 } 874 }
827 875
828 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 876 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
829 nf_conntrack_event_cache(IPCT_STATUS, ct); 877 nf_conntrack_event_cache(IPCT_REPLY, ct);
878out:
879 if (tmpl)
880 nf_ct_put(tmpl);
830 881
831 return ret; 882 return ret;
832} 883}
@@ -865,7 +916,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
865 return; 916 return;
866 917
867 rcu_read_lock(); 918 rcu_read_lock();
868 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 919 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
869 rcu_read_unlock(); 920 rcu_read_unlock();
870} 921}
871EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 922EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
@@ -939,6 +990,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
939} 990}
940EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 991EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
941 992
993#ifdef CONFIG_NF_CONNTRACK_ZONES
994static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
995 .len = sizeof(struct nf_conntrack_zone),
996 .align = __alignof__(struct nf_conntrack_zone),
997 .id = NF_CT_EXT_ZONE,
998};
999#endif
1000
942#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 1001#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
943 1002
944#include <linux/netfilter/nfnetlink.h> 1003#include <linux/netfilter/nfnetlink.h>
@@ -1120,6 +1179,9 @@ static void nf_conntrack_cleanup_init_net(void)
1120 1179
1121 nf_conntrack_helper_fini(); 1180 nf_conntrack_helper_fini();
1122 nf_conntrack_proto_fini(); 1181 nf_conntrack_proto_fini();
1182#ifdef CONFIG_NF_CONNTRACK_ZONES
1183 nf_ct_extend_unregister(&nf_ct_zone_extend);
1184#endif
1123} 1185}
1124 1186
1125static void nf_conntrack_cleanup_net(struct net *net) 1187static void nf_conntrack_cleanup_net(struct net *net)
@@ -1195,6 +1257,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1195 unsigned int hashsize, old_size; 1257 unsigned int hashsize, old_size;
1196 struct hlist_nulls_head *hash, *old_hash; 1258 struct hlist_nulls_head *hash, *old_hash;
1197 struct nf_conntrack_tuple_hash *h; 1259 struct nf_conntrack_tuple_hash *h;
1260 struct nf_conn *ct;
1198 1261
1199 if (current->nsproxy->net_ns != &init_net) 1262 if (current->nsproxy->net_ns != &init_net)
1200 return -EOPNOTSUPP; 1263 return -EOPNOTSUPP;
@@ -1221,8 +1284,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1221 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1284 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1222 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1285 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1223 struct nf_conntrack_tuple_hash, hnnode); 1286 struct nf_conntrack_tuple_hash, hnnode);
1287 ct = nf_ct_tuplehash_to_ctrack(h);
1224 hlist_nulls_del_rcu(&h->hnnode); 1288 hlist_nulls_del_rcu(&h->hnnode);
1225 bucket = __hash_conntrack(&h->tuple, hashsize, 1289 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1290 hashsize,
1226 nf_conntrack_hash_rnd); 1291 nf_conntrack_hash_rnd);
1227 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1292 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1228 } 1293 }
@@ -1280,6 +1345,11 @@ static int nf_conntrack_init_init_net(void)
1280 if (ret < 0) 1345 if (ret < 0)
1281 goto err_helper; 1346 goto err_helper;
1282 1347
1348#ifdef CONFIG_NF_CONNTRACK_ZONES
1349 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1350 if (ret < 0)
1351 goto err_extend;
1352#endif
1283 /* Set up fake conntrack: to never be deleted, not in any hashes */ 1353 /* Set up fake conntrack: to never be deleted, not in any hashes */
1284#ifdef CONFIG_NET_NS 1354#ifdef CONFIG_NET_NS
1285 nf_conntrack_untracked.ct_net = &init_net; 1355 nf_conntrack_untracked.ct_net = &init_net;
@@ -1290,6 +1360,10 @@ static int nf_conntrack_init_init_net(void)
1290 1360
1291 return 0; 1361 return 0;
1292 1362
1363#ifdef CONFIG_NF_CONNTRACK_ZONES
1364err_extend:
1365 nf_conntrack_helper_fini();
1366#endif
1293err_helper: 1367err_helper:
1294 nf_conntrack_proto_fini(); 1368 nf_conntrack_proto_fini();
1295err_proto: 1369err_proto:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index d5a9bcd7d61b..f516961a83b4 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -18,6 +18,7 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/slab.h>
21 22
22#include <net/netfilter/nf_conntrack.h> 23#include <net/netfilter/nf_conntrack.h>
23#include <net/netfilter/nf_conntrack_core.h> 24#include <net/netfilter/nf_conntrack_core.h>
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 2f25ff610982..acb29ccaa41f 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -27,6 +27,7 @@
27#include <net/netfilter/nf_conntrack_expect.h> 27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_helper.h> 28#include <net/netfilter/nf_conntrack_helper.h>
29#include <net/netfilter/nf_conntrack_tuple.h> 29#include <net/netfilter/nf_conntrack_tuple.h>
30#include <net/netfilter/nf_conntrack_zones.h>
30 31
31unsigned int nf_ct_expect_hsize __read_mostly; 32unsigned int nf_ct_expect_hsize __read_mostly;
32EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); 33EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
@@ -84,7 +85,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
84} 85}
85 86
86struct nf_conntrack_expect * 87struct nf_conntrack_expect *
87__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple) 88__nf_ct_expect_find(struct net *net, u16 zone,
89 const struct nf_conntrack_tuple *tuple)
88{ 90{
89 struct nf_conntrack_expect *i; 91 struct nf_conntrack_expect *i;
90 struct hlist_node *n; 92 struct hlist_node *n;
@@ -95,7 +97,8 @@ __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
95 97
96 h = nf_ct_expect_dst_hash(tuple); 98 h = nf_ct_expect_dst_hash(tuple);
97 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { 99 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
98 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) 100 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
101 nf_ct_zone(i->master) == zone)
99 return i; 102 return i;
100 } 103 }
101 return NULL; 104 return NULL;
@@ -104,12 +107,13 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
104 107
105/* Just find a expectation corresponding to a tuple. */ 108/* Just find a expectation corresponding to a tuple. */
106struct nf_conntrack_expect * 109struct nf_conntrack_expect *
107nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 110nf_ct_expect_find_get(struct net *net, u16 zone,
111 const struct nf_conntrack_tuple *tuple)
108{ 112{
109 struct nf_conntrack_expect *i; 113 struct nf_conntrack_expect *i;
110 114
111 rcu_read_lock(); 115 rcu_read_lock();
112 i = __nf_ct_expect_find(net, tuple); 116 i = __nf_ct_expect_find(net, zone, tuple);
113 if (i && !atomic_inc_not_zero(&i->use)) 117 if (i && !atomic_inc_not_zero(&i->use))
114 i = NULL; 118 i = NULL;
115 rcu_read_unlock(); 119 rcu_read_unlock();
@@ -121,7 +125,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
121/* If an expectation for this connection is found, it gets delete from 125/* If an expectation for this connection is found, it gets delete from
122 * global list then returned. */ 126 * global list then returned. */
123struct nf_conntrack_expect * 127struct nf_conntrack_expect *
124nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple) 128nf_ct_find_expectation(struct net *net, u16 zone,
129 const struct nf_conntrack_tuple *tuple)
125{ 130{
126 struct nf_conntrack_expect *i, *exp = NULL; 131 struct nf_conntrack_expect *i, *exp = NULL;
127 struct hlist_node *n; 132 struct hlist_node *n;
@@ -133,7 +138,8 @@ nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
133 h = nf_ct_expect_dst_hash(tuple); 138 h = nf_ct_expect_dst_hash(tuple);
134 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { 139 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
135 if (!(i->flags & NF_CT_EXPECT_INACTIVE) && 140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
136 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { 141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
137 exp = i; 143 exp = i;
138 break; 144 break;
139 } 145 }
@@ -204,7 +210,8 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
204{ 210{
205 return a->master == b->master && a->class == b->class && 211 return a->master == b->master && a->class == b->class &&
206 nf_ct_tuple_equal(&a->tuple, &b->tuple) && 212 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
207 nf_ct_tuple_mask_equal(&a->mask, &b->mask); 213 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
214 nf_ct_zone(a->master) == nf_ct_zone(b->master);
208} 215}
209 216
210/* Generally a bad idea to call this: could have matched already. */ 217/* Generally a bad idea to call this: could have matched already. */
@@ -232,7 +239,6 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
232 239
233 new->master = me; 240 new->master = me;
234 atomic_set(&new->use, 1); 241 atomic_set(&new->use, 1);
235 INIT_RCU_HEAD(&new->rcu);
236 return new; 242 return new;
237} 243}
238EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); 244EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
@@ -500,6 +506,7 @@ static void exp_seq_stop(struct seq_file *seq, void *v)
500static int exp_seq_show(struct seq_file *s, void *v) 506static int exp_seq_show(struct seq_file *s, void *v)
501{ 507{
502 struct nf_conntrack_expect *expect; 508 struct nf_conntrack_expect *expect;
509 struct nf_conntrack_helper *helper;
503 struct hlist_node *n = v; 510 struct hlist_node *n = v;
504 char *delim = ""; 511 char *delim = "";
505 512
@@ -525,6 +532,14 @@ static int exp_seq_show(struct seq_file *s, void *v)
525 if (expect->flags & NF_CT_EXPECT_INACTIVE) 532 if (expect->flags & NF_CT_EXPECT_INACTIVE)
526 seq_printf(s, "%sINACTIVE", delim); 533 seq_printf(s, "%sINACTIVE", delim);
527 534
535 helper = rcu_dereference(nfct_help(expect->master)->helper);
536 if (helper) {
537 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
538 if (helper->expect_policy[expect->class].name)
539 seq_printf(s, "/%s",
540 helper->expect_policy[expect->class].name);
541 }
542
528 return seq_putc(s, '\n'); 543 return seq_putc(s, '\n');
529} 544}
530 545
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index fef95be334bd..fdc8fb4ae10f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -59,7 +59,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
59 if (!*ext) 59 if (!*ext)
60 return NULL; 60 return NULL;
61 61
62 INIT_RCU_HEAD(&(*ext)->rcu);
63 (*ext)->offset[id] = off; 62 (*ext)->offset[id] = off;
64 (*ext)->len = len; 63 (*ext)->len = len;
65 64
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index f0732aa18e4f..2ae3169e7633 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -13,6 +13,7 @@
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/netfilter.h> 14#include <linux/netfilter.h>
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <linux/slab.h>
16#include <linux/ipv6.h> 17#include <linux/ipv6.h>
17#include <linux/ctype.h> 18#include <linux/ctype.h>
18#include <linux/inet.h> 19#include <linux/inet.h>
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 66369490230e..a487c8038044 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -17,6 +17,7 @@
17#include <linux/inet.h> 17#include <linux/inet.h>
18#include <linux/in.h> 18#include <linux/in.h>
19#include <linux/ip.h> 19#include <linux/ip.h>
20#include <linux/slab.h>
20#include <linux/udp.h> 21#include <linux/udp.h>
21#include <linux/tcp.h> 22#include <linux/tcp.h>
22#include <linux/skbuff.h> 23#include <linux/skbuff.h>
@@ -29,6 +30,7 @@
29#include <net/netfilter/nf_conntrack_expect.h> 30#include <net/netfilter/nf_conntrack_expect.h>
30#include <net/netfilter/nf_conntrack_ecache.h> 31#include <net/netfilter/nf_conntrack_ecache.h>
31#include <net/netfilter/nf_conntrack_helper.h> 32#include <net/netfilter/nf_conntrack_helper.h>
33#include <net/netfilter/nf_conntrack_zones.h>
32#include <linux/netfilter/nf_conntrack_h323.h> 34#include <linux/netfilter/nf_conntrack_h323.h>
33 35
34/* Parameters */ 36/* Parameters */
@@ -1216,7 +1218,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
1216 tuple.dst.u.tcp.port = port; 1218 tuple.dst.u.tcp.port = port;
1217 tuple.dst.protonum = IPPROTO_TCP; 1219 tuple.dst.protonum = IPPROTO_TCP;
1218 1220
1219 exp = __nf_ct_expect_find(net, &tuple); 1221 exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
1220 if (exp && exp->master == ct) 1222 if (exp && exp->master == ct)
1221 return exp; 1223 return exp;
1222 return NULL; 1224 return NULL;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4b1a56bd074c..59e1a4cd4e8b 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -15,7 +15,6 @@
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/stddef.h> 17#include <linux/stddef.h>
18#include <linux/slab.h>
19#include <linux/random.h> 18#include <linux/random.h>
20#include <linux/err.h> 19#include <linux/err.h>
21#include <linux/kernel.h> 20#include <linux/kernel.h>
@@ -65,7 +64,7 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
65} 64}
66 65
67struct nf_conntrack_helper * 66struct nf_conntrack_helper *
68__nf_conntrack_helper_find_byname(const char *name) 67__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
69{ 68{
70 struct nf_conntrack_helper *h; 69 struct nf_conntrack_helper *h;
71 struct hlist_node *n; 70 struct hlist_node *n;
@@ -73,13 +72,34 @@ __nf_conntrack_helper_find_byname(const char *name)
73 72
74 for (i = 0; i < nf_ct_helper_hsize; i++) { 73 for (i = 0; i < nf_ct_helper_hsize; i++) {
75 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { 74 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
76 if (!strcmp(h->name, name)) 75 if (!strcmp(h->name, name) &&
76 h->tuple.src.l3num == l3num &&
77 h->tuple.dst.protonum == protonum)
77 return h; 78 return h;
78 } 79 }
79 } 80 }
80 return NULL; 81 return NULL;
81} 82}
82EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname); 83EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find);
84
85struct nf_conntrack_helper *
86nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
87{
88 struct nf_conntrack_helper *h;
89
90 h = __nf_conntrack_helper_find(name, l3num, protonum);
91#ifdef CONFIG_MODULES
92 if (h == NULL) {
93 if (request_module("nfct-helper-%s", name) == 0)
94 h = __nf_conntrack_helper_find(name, l3num, protonum);
95 }
96#endif
97 if (h != NULL && !try_module_get(h->me))
98 h = NULL;
99
100 return h;
101}
102EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
83 103
84struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) 104struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
85{ 105{
@@ -94,13 +114,22 @@ struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
94} 114}
95EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); 115EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
96 116
97int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags) 117int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
118 gfp_t flags)
98{ 119{
120 struct nf_conntrack_helper *helper = NULL;
121 struct nf_conn_help *help;
99 int ret = 0; 122 int ret = 0;
100 struct nf_conntrack_helper *helper;
101 struct nf_conn_help *help = nfct_help(ct);
102 123
103 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 124 if (tmpl != NULL) {
125 help = nfct_help(tmpl);
126 if (help != NULL)
127 helper = help->helper;
128 }
129
130 help = nfct_help(ct);
131 if (helper == NULL)
132 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
104 if (helper == NULL) { 133 if (helper == NULL) {
105 if (help) 134 if (help)
106 rcu_assign_pointer(help->helper, NULL); 135 rcu_assign_pointer(help->helper, NULL);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 8bd98c84f77e..7673930ca342 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -15,6 +15,7 @@
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <linux/tcp.h> 16#include <linux/tcp.h>
17#include <linux/netfilter.h> 17#include <linux/netfilter.h>
18#include <linux/slab.h>
18 19
19#include <net/netfilter/nf_conntrack.h> 20#include <net/netfilter/nf_conntrack.h>
20#include <net/netfilter/nf_conntrack_expect.h> 21#include <net/netfilter/nf_conntrack_expect.h>
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0ffe689dfe97..afc52f2ee4ac 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -27,9 +27,11 @@
27#include <linux/netlink.h> 27#include <linux/netlink.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/slab.h>
30 31
31#include <linux/netfilter.h> 32#include <linux/netfilter.h>
32#include <net/netlink.h> 33#include <net/netlink.h>
34#include <net/sock.h>
33#include <net/netfilter/nf_conntrack.h> 35#include <net/netfilter/nf_conntrack.h>
34#include <net/netfilter/nf_conntrack_core.h> 36#include <net/netfilter/nf_conntrack_core.h>
35#include <net/netfilter/nf_conntrack_expect.h> 37#include <net/netfilter/nf_conntrack_expect.h>
@@ -38,6 +40,7 @@
38#include <net/netfilter/nf_conntrack_l4proto.h> 40#include <net/netfilter/nf_conntrack_l4proto.h>
39#include <net/netfilter/nf_conntrack_tuple.h> 41#include <net/netfilter/nf_conntrack_tuple.h>
40#include <net/netfilter/nf_conntrack_acct.h> 42#include <net/netfilter/nf_conntrack_acct.h>
43#include <net/netfilter/nf_conntrack_zones.h>
41#ifdef CONFIG_NF_NAT_NEEDED 44#ifdef CONFIG_NF_NAT_NEEDED
42#include <net/netfilter/nf_nat_core.h> 45#include <net/netfilter/nf_nat_core.h>
43#include <net/netfilter/nf_nat_protocol.h> 46#include <net/netfilter/nf_nat_protocol.h>
@@ -378,6 +381,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
378 goto nla_put_failure; 381 goto nla_put_failure;
379 nla_nest_end(skb, nest_parms); 382 nla_nest_end(skb, nest_parms);
380 383
384 if (nf_ct_zone(ct))
385 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
386
381 if (ctnetlink_dump_status(skb, ct) < 0 || 387 if (ctnetlink_dump_status(skb, ct) < 0 ||
382 ctnetlink_dump_timeout(skb, ct) < 0 || 388 ctnetlink_dump_timeout(skb, ct) < 0 ||
383 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 389 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
@@ -456,6 +462,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
456static int 462static int
457ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) 463ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
458{ 464{
465 struct net *net;
459 struct nlmsghdr *nlh; 466 struct nlmsghdr *nlh;
460 struct nfgenmsg *nfmsg; 467 struct nfgenmsg *nfmsg;
461 struct nlattr *nest_parms; 468 struct nlattr *nest_parms;
@@ -482,7 +489,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
482 } else 489 } else
483 return 0; 490 return 0;
484 491
485 if (!item->report && !nfnetlink_has_listeners(group)) 492 net = nf_ct_net(ct);
493 if (!item->report && !nfnetlink_has_listeners(net, group))
486 return 0; 494 return 0;
487 495
488 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); 496 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
@@ -514,6 +522,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
514 goto nla_put_failure; 522 goto nla_put_failure;
515 nla_nest_end(skb, nest_parms); 523 nla_nest_end(skb, nest_parms);
516 524
525 if (nf_ct_zone(ct))
526 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
527
517 if (ctnetlink_dump_id(skb, ct) < 0) 528 if (ctnetlink_dump_id(skb, ct) < 0)
518 goto nla_put_failure; 529 goto nla_put_failure;
519 530
@@ -559,7 +570,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
559 rcu_read_unlock(); 570 rcu_read_unlock();
560 571
561 nlmsg_end(skb, nlh); 572 nlmsg_end(skb, nlh);
562 err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC); 573 err = nfnetlink_send(skb, net, item->pid, group, item->report,
574 GFP_ATOMIC);
563 if (err == -ENOBUFS || err == -EAGAIN) 575 if (err == -ENOBUFS || err == -EAGAIN)
564 return -ENOBUFS; 576 return -ENOBUFS;
565 577
@@ -571,7 +583,9 @@ nla_put_failure:
571nlmsg_failure: 583nlmsg_failure:
572 kfree_skb(skb); 584 kfree_skb(skb);
573errout: 585errout:
574 nfnetlink_set_err(0, group, -ENOBUFS); 586 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
587 return -ENOBUFS;
588
575 return 0; 589 return 0;
576} 590}
577#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 591#endif /* CONFIG_NF_CONNTRACK_EVENTS */
@@ -586,6 +600,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
586static int 600static int
587ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 601ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
588{ 602{
603 struct net *net = sock_net(skb->sk);
589 struct nf_conn *ct, *last; 604 struct nf_conn *ct, *last;
590 struct nf_conntrack_tuple_hash *h; 605 struct nf_conntrack_tuple_hash *h;
591 struct hlist_nulls_node *n; 606 struct hlist_nulls_node *n;
@@ -594,9 +609,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 609
595 rcu_read_lock(); 610 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 611 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { 612 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
598restart: 613restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 614 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
600 hnnode) { 615 hnnode) {
601 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 616 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
602 continue; 617 continue;
@@ -703,6 +718,11 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
703 return ret; 718 return ret;
704} 719}
705 720
721static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
722 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
723 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
724};
725
706static int 726static int
707ctnetlink_parse_tuple(const struct nlattr * const cda[], 727ctnetlink_parse_tuple(const struct nlattr * const cda[],
708 struct nf_conntrack_tuple *tuple, 728 struct nf_conntrack_tuple *tuple,
@@ -713,7 +733,7 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
713 733
714 memset(tuple, 0, sizeof(*tuple)); 734 memset(tuple, 0, sizeof(*tuple));
715 735
716 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL); 736 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
717 737
718 if (!tb[CTA_TUPLE_IP]) 738 if (!tb[CTA_TUPLE_IP])
719 return -EINVAL; 739 return -EINVAL;
@@ -740,12 +760,31 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
740 return 0; 760 return 0;
741} 761}
742 762
763static int
764ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
765{
766 if (attr)
767#ifdef CONFIG_NF_CONNTRACK_ZONES
768 *zone = ntohs(nla_get_be16(attr));
769#else
770 return -EOPNOTSUPP;
771#endif
772 else
773 *zone = 0;
774
775 return 0;
776}
777
778static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
779 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
780};
781
743static inline int 782static inline int
744ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) 783ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
745{ 784{
746 struct nlattr *tb[CTA_HELP_MAX+1]; 785 struct nlattr *tb[CTA_HELP_MAX+1];
747 786
748 nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL); 787 nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
749 788
750 if (!tb[CTA_HELP_NAME]) 789 if (!tb[CTA_HELP_NAME])
751 return -EINVAL; 790 return -EINVAL;
@@ -756,11 +795,18 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
756} 795}
757 796
758static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { 797static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
798 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
799 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
759 [CTA_STATUS] = { .type = NLA_U32 }, 800 [CTA_STATUS] = { .type = NLA_U32 },
801 [CTA_PROTOINFO] = { .type = NLA_NESTED },
802 [CTA_HELP] = { .type = NLA_NESTED },
803 [CTA_NAT_SRC] = { .type = NLA_NESTED },
760 [CTA_TIMEOUT] = { .type = NLA_U32 }, 804 [CTA_TIMEOUT] = { .type = NLA_U32 },
761 [CTA_MARK] = { .type = NLA_U32 }, 805 [CTA_MARK] = { .type = NLA_U32 },
762 [CTA_USE] = { .type = NLA_U32 },
763 [CTA_ID] = { .type = NLA_U32 }, 806 [CTA_ID] = { .type = NLA_U32 },
807 [CTA_NAT_DST] = { .type = NLA_NESTED },
808 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
809 [CTA_ZONE] = { .type = NLA_U16 },
764}; 810};
765 811
766static int 812static int
@@ -768,12 +814,18 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
768 const struct nlmsghdr *nlh, 814 const struct nlmsghdr *nlh,
769 const struct nlattr * const cda[]) 815 const struct nlattr * const cda[])
770{ 816{
817 struct net *net = sock_net(ctnl);
771 struct nf_conntrack_tuple_hash *h; 818 struct nf_conntrack_tuple_hash *h;
772 struct nf_conntrack_tuple tuple; 819 struct nf_conntrack_tuple tuple;
773 struct nf_conn *ct; 820 struct nf_conn *ct;
774 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 821 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
775 u_int8_t u3 = nfmsg->nfgen_family; 822 u_int8_t u3 = nfmsg->nfgen_family;
776 int err = 0; 823 u16 zone;
824 int err;
825
826 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
827 if (err < 0)
828 return err;
777 829
778 if (cda[CTA_TUPLE_ORIG]) 830 if (cda[CTA_TUPLE_ORIG])
779 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); 831 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
@@ -781,7 +833,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
781 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); 833 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
782 else { 834 else {
783 /* Flush the whole table */ 835 /* Flush the whole table */
784 nf_conntrack_flush_report(&init_net, 836 nf_conntrack_flush_report(net,
785 NETLINK_CB(skb).pid, 837 NETLINK_CB(skb).pid,
786 nlmsg_report(nlh)); 838 nlmsg_report(nlh));
787 return 0; 839 return 0;
@@ -790,7 +842,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
790 if (err < 0) 842 if (err < 0)
791 return err; 843 return err;
792 844
793 h = nf_conntrack_find_get(&init_net, &tuple); 845 h = nf_conntrack_find_get(net, zone, &tuple);
794 if (!h) 846 if (!h)
795 return -ENOENT; 847 return -ENOENT;
796 848
@@ -828,18 +880,24 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
828 const struct nlmsghdr *nlh, 880 const struct nlmsghdr *nlh,
829 const struct nlattr * const cda[]) 881 const struct nlattr * const cda[])
830{ 882{
883 struct net *net = sock_net(ctnl);
831 struct nf_conntrack_tuple_hash *h; 884 struct nf_conntrack_tuple_hash *h;
832 struct nf_conntrack_tuple tuple; 885 struct nf_conntrack_tuple tuple;
833 struct nf_conn *ct; 886 struct nf_conn *ct;
834 struct sk_buff *skb2 = NULL; 887 struct sk_buff *skb2 = NULL;
835 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 888 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
836 u_int8_t u3 = nfmsg->nfgen_family; 889 u_int8_t u3 = nfmsg->nfgen_family;
837 int err = 0; 890 u16 zone;
891 int err;
838 892
839 if (nlh->nlmsg_flags & NLM_F_DUMP) 893 if (nlh->nlmsg_flags & NLM_F_DUMP)
840 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 894 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
841 ctnetlink_done); 895 ctnetlink_done);
842 896
897 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
898 if (err < 0)
899 return err;
900
843 if (cda[CTA_TUPLE_ORIG]) 901 if (cda[CTA_TUPLE_ORIG])
844 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); 902 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
845 else if (cda[CTA_TUPLE_REPLY]) 903 else if (cda[CTA_TUPLE_REPLY])
@@ -850,7 +908,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
850 if (err < 0) 908 if (err < 0)
851 return err; 909 return err;
852 910
853 h = nf_conntrack_find_get(&init_net, &tuple); 911 h = nf_conntrack_find_get(net, zone, &tuple);
854 if (!h) 912 if (!h)
855 return -ENOENT; 913 return -ENOENT;
856 914
@@ -994,7 +1052,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
994 return 0; 1052 return 0;
995 } 1053 }
996 1054
997 helper = __nf_conntrack_helper_find_byname(helpname); 1055 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1056 nf_ct_protonum(ct));
998 if (helper == NULL) { 1057 if (helper == NULL) {
999#ifdef CONFIG_MODULES 1058#ifdef CONFIG_MODULES
1000 spin_unlock_bh(&nf_conntrack_lock); 1059 spin_unlock_bh(&nf_conntrack_lock);
@@ -1005,7 +1064,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1005 } 1064 }
1006 1065
1007 spin_lock_bh(&nf_conntrack_lock); 1066 spin_lock_bh(&nf_conntrack_lock);
1008 helper = __nf_conntrack_helper_find_byname(helpname); 1067 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1068 nf_ct_protonum(ct));
1009 if (helper) 1069 if (helper)
1010 return -EAGAIN; 1070 return -EAGAIN;
1011#endif 1071#endif
@@ -1020,9 +1080,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1020 /* need to zero data of old helper */ 1080 /* need to zero data of old helper */
1021 memset(&help->help, 0, sizeof(help->help)); 1081 memset(&help->help, 0, sizeof(help->help));
1022 } else { 1082 } else {
1023 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1083 /* we cannot set a helper for an existing conntrack */
1024 if (help == NULL) 1084 return -EOPNOTSUPP;
1025 return -ENOMEM;
1026 } 1085 }
1027 1086
1028 rcu_assign_pointer(help->helper, helper); 1087 rcu_assign_pointer(help->helper, helper);
@@ -1044,6 +1103,12 @@ ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1044 return 0; 1103 return 0;
1045} 1104}
1046 1105
1106static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1107 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1108 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1109 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1110};
1111
1047static inline int 1112static inline int
1048ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) 1113ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1049{ 1114{
@@ -1052,7 +1117,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
1052 struct nf_conntrack_l4proto *l4proto; 1117 struct nf_conntrack_l4proto *l4proto;
1053 int err = 0; 1118 int err = 0;
1054 1119
1055 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); 1120 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1056 1121
1057 rcu_read_lock(); 1122 rcu_read_lock();
1058 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 1123 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
@@ -1064,12 +1129,18 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
1064} 1129}
1065 1130
1066#ifdef CONFIG_NF_NAT_NEEDED 1131#ifdef CONFIG_NF_NAT_NEEDED
1132static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1133 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1134 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1135 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1136};
1137
1067static inline int 1138static inline int
1068change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) 1139change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1069{ 1140{
1070 struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; 1141 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1071 1142
1072 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, NULL); 1143 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1073 1144
1074 if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) 1145 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1075 return -EINVAL; 1146 return -EINVAL;
@@ -1175,7 +1246,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
1175} 1246}
1176 1247
1177static struct nf_conn * 1248static struct nf_conn *
1178ctnetlink_create_conntrack(const struct nlattr * const cda[], 1249ctnetlink_create_conntrack(struct net *net, u16 zone,
1250 const struct nlattr * const cda[],
1179 struct nf_conntrack_tuple *otuple, 1251 struct nf_conntrack_tuple *otuple,
1180 struct nf_conntrack_tuple *rtuple, 1252 struct nf_conntrack_tuple *rtuple,
1181 u8 u3) 1253 u8 u3)
@@ -1184,7 +1256,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1184 int err = -EINVAL; 1256 int err = -EINVAL;
1185 struct nf_conntrack_helper *helper; 1257 struct nf_conntrack_helper *helper;
1186 1258
1187 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); 1259 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1188 if (IS_ERR(ct)) 1260 if (IS_ERR(ct))
1189 return ERR_PTR(-ENOMEM); 1261 return ERR_PTR(-ENOMEM);
1190 1262
@@ -1193,7 +1265,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1193 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1265 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1194 1266
1195 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; 1267 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1196 ct->status |= IPS_CONFIRMED;
1197 1268
1198 rcu_read_lock(); 1269 rcu_read_lock();
1199 if (cda[CTA_HELP]) { 1270 if (cda[CTA_HELP]) {
@@ -1203,7 +1274,8 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1203 if (err < 0) 1274 if (err < 0)
1204 goto err2; 1275 goto err2;
1205 1276
1206 helper = __nf_conntrack_helper_find_byname(helpname); 1277 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1278 nf_ct_protonum(ct));
1207 if (helper == NULL) { 1279 if (helper == NULL) {
1208 rcu_read_unlock(); 1280 rcu_read_unlock();
1209#ifdef CONFIG_MODULES 1281#ifdef CONFIG_MODULES
@@ -1213,7 +1285,9 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1213 } 1285 }
1214 1286
1215 rcu_read_lock(); 1287 rcu_read_lock();
1216 helper = __nf_conntrack_helper_find_byname(helpname); 1288 helper = __nf_conntrack_helper_find(helpname,
1289 nf_ct_l3num(ct),
1290 nf_ct_protonum(ct));
1217 if (helper) { 1291 if (helper) {
1218 err = -EAGAIN; 1292 err = -EAGAIN;
1219 goto err2; 1293 goto err2;
@@ -1236,19 +1310,24 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1236 } 1310 }
1237 } else { 1311 } else {
1238 /* try an implicit helper assignation */ 1312 /* try an implicit helper assignation */
1239 err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 1313 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1240 if (err < 0) 1314 if (err < 0)
1241 goto err2; 1315 goto err2;
1242 } 1316 }
1243 1317
1244 if (cda[CTA_STATUS]) { 1318 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1245 err = ctnetlink_change_status(ct, cda); 1319 err = ctnetlink_change_nat(ct, cda);
1246 if (err < 0) 1320 if (err < 0)
1247 goto err2; 1321 goto err2;
1248 } 1322 }
1249 1323
1250 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1324 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1251 err = ctnetlink_change_nat(ct, cda); 1325 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1326 /* we must add conntrack extensions before confirmation. */
1327 ct->status |= IPS_CONFIRMED;
1328
1329 if (cda[CTA_STATUS]) {
1330 err = ctnetlink_change_status(ct, cda);
1252 if (err < 0) 1331 if (err < 0)
1253 goto err2; 1332 goto err2;
1254 } 1333 }
@@ -1267,9 +1346,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1267 goto err2; 1346 goto err2;
1268 } 1347 }
1269 1348
1270 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1271 nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
1272
1273#if defined(CONFIG_NF_CONNTRACK_MARK) 1349#if defined(CONFIG_NF_CONNTRACK_MARK)
1274 if (cda[CTA_MARK]) 1350 if (cda[CTA_MARK])
1275 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1351 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
@@ -1285,7 +1361,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1285 if (err < 0) 1361 if (err < 0)
1286 goto err2; 1362 goto err2;
1287 1363
1288 master_h = nf_conntrack_find_get(&init_net, &master); 1364 master_h = nf_conntrack_find_get(net, zone, &master);
1289 if (master_h == NULL) { 1365 if (master_h == NULL) {
1290 err = -ENOENT; 1366 err = -ENOENT;
1291 goto err2; 1367 goto err2;
@@ -1313,11 +1389,17 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1313 const struct nlmsghdr *nlh, 1389 const struct nlmsghdr *nlh,
1314 const struct nlattr * const cda[]) 1390 const struct nlattr * const cda[])
1315{ 1391{
1392 struct net *net = sock_net(ctnl);
1316 struct nf_conntrack_tuple otuple, rtuple; 1393 struct nf_conntrack_tuple otuple, rtuple;
1317 struct nf_conntrack_tuple_hash *h = NULL; 1394 struct nf_conntrack_tuple_hash *h = NULL;
1318 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1395 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1319 u_int8_t u3 = nfmsg->nfgen_family; 1396 u_int8_t u3 = nfmsg->nfgen_family;
1320 int err = 0; 1397 u16 zone;
1398 int err;
1399
1400 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1401 if (err < 0)
1402 return err;
1321 1403
1322 if (cda[CTA_TUPLE_ORIG]) { 1404 if (cda[CTA_TUPLE_ORIG]) {
1323 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); 1405 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
@@ -1333,9 +1415,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1333 1415
1334 spin_lock_bh(&nf_conntrack_lock); 1416 spin_lock_bh(&nf_conntrack_lock);
1335 if (cda[CTA_TUPLE_ORIG]) 1417 if (cda[CTA_TUPLE_ORIG])
1336 h = __nf_conntrack_find(&init_net, &otuple); 1418 h = __nf_conntrack_find(net, zone, &otuple);
1337 else if (cda[CTA_TUPLE_REPLY]) 1419 else if (cda[CTA_TUPLE_REPLY])
1338 h = __nf_conntrack_find(&init_net, &rtuple); 1420 h = __nf_conntrack_find(net, zone, &rtuple);
1339 1421
1340 if (h == NULL) { 1422 if (h == NULL) {
1341 err = -ENOENT; 1423 err = -ENOENT;
@@ -1343,7 +1425,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1343 struct nf_conn *ct; 1425 struct nf_conn *ct;
1344 enum ip_conntrack_events events; 1426 enum ip_conntrack_events events;
1345 1427
1346 ct = ctnetlink_create_conntrack(cda, &otuple, 1428 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1347 &rtuple, u3); 1429 &rtuple, u3);
1348 if (IS_ERR(ct)) { 1430 if (IS_ERR(ct)) {
1349 err = PTR_ERR(ct); 1431 err = PTR_ERR(ct);
@@ -1357,7 +1439,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1357 else 1439 else
1358 events = IPCT_NEW; 1440 events = IPCT_NEW;
1359 1441
1360 nf_conntrack_eventmask_report((1 << IPCT_STATUS) | 1442 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1443 (1 << IPCT_ASSURED) |
1361 (1 << IPCT_HELPER) | 1444 (1 << IPCT_HELPER) |
1362 (1 << IPCT_PROTOINFO) | 1445 (1 << IPCT_PROTOINFO) |
1363 (1 << IPCT_NATSEQADJ) | 1446 (1 << IPCT_NATSEQADJ) |
@@ -1382,7 +1465,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1382 if (err == 0) { 1465 if (err == 0) {
1383 nf_conntrack_get(&ct->ct_general); 1466 nf_conntrack_get(&ct->ct_general);
1384 spin_unlock_bh(&nf_conntrack_lock); 1467 spin_unlock_bh(&nf_conntrack_lock);
1385 nf_conntrack_eventmask_report((1 << IPCT_STATUS) | 1468 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1469 (1 << IPCT_ASSURED) |
1386 (1 << IPCT_HELPER) | 1470 (1 << IPCT_HELPER) |
1387 (1 << IPCT_PROTOINFO) | 1471 (1 << IPCT_PROTOINFO) |
1388 (1 << IPCT_NATSEQADJ) | 1472 (1 << IPCT_NATSEQADJ) |
@@ -1469,6 +1553,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1469 const struct nf_conntrack_expect *exp) 1553 const struct nf_conntrack_expect *exp)
1470{ 1554{
1471 struct nf_conn *master = exp->master; 1555 struct nf_conn *master = exp->master;
1556 struct nf_conntrack_helper *helper;
1472 long timeout = (exp->timeout.expires - jiffies) / HZ; 1557 long timeout = (exp->timeout.expires - jiffies) / HZ;
1473 1558
1474 if (timeout < 0) 1559 if (timeout < 0)
@@ -1485,6 +1570,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1485 1570
1486 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); 1571 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
1487 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); 1572 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
1573 helper = rcu_dereference(nfct_help(master)->helper);
1574 if (helper)
1575 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
1488 1576
1489 return 0; 1577 return 0;
1490 1578
@@ -1526,9 +1614,10 @@ nla_put_failure:
1526static int 1614static int
1527ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) 1615ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1528{ 1616{
1617 struct nf_conntrack_expect *exp = item->exp;
1618 struct net *net = nf_ct_exp_net(exp);
1529 struct nlmsghdr *nlh; 1619 struct nlmsghdr *nlh;
1530 struct nfgenmsg *nfmsg; 1620 struct nfgenmsg *nfmsg;
1531 struct nf_conntrack_expect *exp = item->exp;
1532 struct sk_buff *skb; 1621 struct sk_buff *skb;
1533 unsigned int type; 1622 unsigned int type;
1534 int flags = 0; 1623 int flags = 0;
@@ -1540,7 +1629,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1540 return 0; 1629 return 0;
1541 1630
1542 if (!item->report && 1631 if (!item->report &&
1543 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1632 !nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW))
1544 return 0; 1633 return 0;
1545 1634
1546 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1635 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -1563,7 +1652,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1563 rcu_read_unlock(); 1652 rcu_read_unlock();
1564 1653
1565 nlmsg_end(skb, nlh); 1654 nlmsg_end(skb, nlh);
1566 nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, 1655 nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
1567 item->report, GFP_ATOMIC); 1656 item->report, GFP_ATOMIC);
1568 return 0; 1657 return 0;
1569 1658
@@ -1573,7 +1662,7 @@ nla_put_failure:
1573nlmsg_failure: 1662nlmsg_failure:
1574 kfree_skb(skb); 1663 kfree_skb(skb);
1575errout: 1664errout:
1576 nfnetlink_set_err(0, 0, -ENOBUFS); 1665 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
1577 return 0; 1666 return 0;
1578} 1667}
1579#endif 1668#endif
@@ -1587,7 +1676,7 @@ static int ctnetlink_exp_done(struct netlink_callback *cb)
1587static int 1676static int
1588ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1677ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1589{ 1678{
1590 struct net *net = &init_net; 1679 struct net *net = sock_net(skb->sk);
1591 struct nf_conntrack_expect *exp, *last; 1680 struct nf_conntrack_expect *exp, *last;
1592 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1681 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1593 struct hlist_node *n; 1682 struct hlist_node *n;
@@ -1631,8 +1720,12 @@ out:
1631} 1720}
1632 1721
1633static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { 1722static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1723 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
1724 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
1725 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
1634 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, 1726 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
1635 [CTA_EXPECT_ID] = { .type = NLA_U32 }, 1727 [CTA_EXPECT_ID] = { .type = NLA_U32 },
1728 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
1636}; 1729};
1637 1730
1638static int 1731static int
@@ -1640,12 +1733,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1640 const struct nlmsghdr *nlh, 1733 const struct nlmsghdr *nlh,
1641 const struct nlattr * const cda[]) 1734 const struct nlattr * const cda[])
1642{ 1735{
1736 struct net *net = sock_net(ctnl);
1643 struct nf_conntrack_tuple tuple; 1737 struct nf_conntrack_tuple tuple;
1644 struct nf_conntrack_expect *exp; 1738 struct nf_conntrack_expect *exp;
1645 struct sk_buff *skb2; 1739 struct sk_buff *skb2;
1646 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1740 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1647 u_int8_t u3 = nfmsg->nfgen_family; 1741 u_int8_t u3 = nfmsg->nfgen_family;
1648 int err = 0; 1742 u16 zone;
1743 int err;
1649 1744
1650 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1745 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1651 return netlink_dump_start(ctnl, skb, nlh, 1746 return netlink_dump_start(ctnl, skb, nlh,
@@ -1653,6 +1748,10 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1653 ctnetlink_exp_done); 1748 ctnetlink_exp_done);
1654 } 1749 }
1655 1750
1751 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1752 if (err < 0)
1753 return err;
1754
1656 if (cda[CTA_EXPECT_MASTER]) 1755 if (cda[CTA_EXPECT_MASTER])
1657 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); 1756 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
1658 else 1757 else
@@ -1661,7 +1760,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1661 if (err < 0) 1760 if (err < 0)
1662 return err; 1761 return err;
1663 1762
1664 exp = nf_ct_expect_find_get(&init_net, &tuple); 1763 exp = nf_ct_expect_find_get(net, zone, &tuple);
1665 if (!exp) 1764 if (!exp)
1666 return -ENOENT; 1765 return -ENOENT;
1667 1766
@@ -1701,23 +1800,28 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1701 const struct nlmsghdr *nlh, 1800 const struct nlmsghdr *nlh,
1702 const struct nlattr * const cda[]) 1801 const struct nlattr * const cda[])
1703{ 1802{
1803 struct net *net = sock_net(ctnl);
1704 struct nf_conntrack_expect *exp; 1804 struct nf_conntrack_expect *exp;
1705 struct nf_conntrack_tuple tuple; 1805 struct nf_conntrack_tuple tuple;
1706 struct nf_conntrack_helper *h;
1707 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1806 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1708 struct hlist_node *n, *next; 1807 struct hlist_node *n, *next;
1709 u_int8_t u3 = nfmsg->nfgen_family; 1808 u_int8_t u3 = nfmsg->nfgen_family;
1710 unsigned int i; 1809 unsigned int i;
1810 u16 zone;
1711 int err; 1811 int err;
1712 1812
1713 if (cda[CTA_EXPECT_TUPLE]) { 1813 if (cda[CTA_EXPECT_TUPLE]) {
1714 /* delete a single expect by tuple */ 1814 /* delete a single expect by tuple */
1815 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1816 if (err < 0)
1817 return err;
1818
1715 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1819 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1716 if (err < 0) 1820 if (err < 0)
1717 return err; 1821 return err;
1718 1822
1719 /* bump usage count to 2 */ 1823 /* bump usage count to 2 */
1720 exp = nf_ct_expect_find_get(&init_net, &tuple); 1824 exp = nf_ct_expect_find_get(net, zone, &tuple);
1721 if (!exp) 1825 if (!exp)
1722 return -ENOENT; 1826 return -ENOENT;
1723 1827
@@ -1740,18 +1844,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1740 1844
1741 /* delete all expectations for this helper */ 1845 /* delete all expectations for this helper */
1742 spin_lock_bh(&nf_conntrack_lock); 1846 spin_lock_bh(&nf_conntrack_lock);
1743 h = __nf_conntrack_helper_find_byname(name);
1744 if (!h) {
1745 spin_unlock_bh(&nf_conntrack_lock);
1746 return -EOPNOTSUPP;
1747 }
1748 for (i = 0; i < nf_ct_expect_hsize; i++) { 1847 for (i = 0; i < nf_ct_expect_hsize; i++) {
1749 hlist_for_each_entry_safe(exp, n, next, 1848 hlist_for_each_entry_safe(exp, n, next,
1750 &init_net.ct.expect_hash[i], 1849 &net->ct.expect_hash[i],
1751 hnode) { 1850 hnode) {
1752 m_help = nfct_help(exp->master); 1851 m_help = nfct_help(exp->master);
1753 if (m_help->helper == h 1852 if (!strcmp(m_help->helper->name, name) &&
1754 && del_timer(&exp->timeout)) { 1853 del_timer(&exp->timeout)) {
1755 nf_ct_unlink_expect(exp); 1854 nf_ct_unlink_expect(exp);
1756 nf_ct_expect_put(exp); 1855 nf_ct_expect_put(exp);
1757 } 1856 }
@@ -1763,7 +1862,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1763 spin_lock_bh(&nf_conntrack_lock); 1862 spin_lock_bh(&nf_conntrack_lock);
1764 for (i = 0; i < nf_ct_expect_hsize; i++) { 1863 for (i = 0; i < nf_ct_expect_hsize; i++) {
1765 hlist_for_each_entry_safe(exp, n, next, 1864 hlist_for_each_entry_safe(exp, n, next,
1766 &init_net.ct.expect_hash[i], 1865 &net->ct.expect_hash[i],
1767 hnode) { 1866 hnode) {
1768 if (del_timer(&exp->timeout)) { 1867 if (del_timer(&exp->timeout)) {
1769 nf_ct_unlink_expect(exp); 1868 nf_ct_unlink_expect(exp);
@@ -1784,7 +1883,9 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
1784} 1883}
1785 1884
1786static int 1885static int
1787ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3, 1886ctnetlink_create_expect(struct net *net, u16 zone,
1887 const struct nlattr * const cda[],
1888 u_int8_t u3,
1788 u32 pid, int report) 1889 u32 pid, int report)
1789{ 1890{
1790 struct nf_conntrack_tuple tuple, mask, master_tuple; 1891 struct nf_conntrack_tuple tuple, mask, master_tuple;
@@ -1806,7 +1907,7 @@ ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
1806 return err; 1907 return err;
1807 1908
1808 /* Look for master conntrack of this expectation */ 1909 /* Look for master conntrack of this expectation */
1809 h = nf_conntrack_find_get(&init_net, &master_tuple); 1910 h = nf_conntrack_find_get(net, zone, &master_tuple);
1810 if (!h) 1911 if (!h)
1811 return -ENOENT; 1912 return -ENOENT;
1812 ct = nf_ct_tuplehash_to_ctrack(h); 1913 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1846,29 +1947,35 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1846 const struct nlmsghdr *nlh, 1947 const struct nlmsghdr *nlh,
1847 const struct nlattr * const cda[]) 1948 const struct nlattr * const cda[])
1848{ 1949{
1950 struct net *net = sock_net(ctnl);
1849 struct nf_conntrack_tuple tuple; 1951 struct nf_conntrack_tuple tuple;
1850 struct nf_conntrack_expect *exp; 1952 struct nf_conntrack_expect *exp;
1851 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1953 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1852 u_int8_t u3 = nfmsg->nfgen_family; 1954 u_int8_t u3 = nfmsg->nfgen_family;
1853 int err = 0; 1955 u16 zone;
1956 int err;
1854 1957
1855 if (!cda[CTA_EXPECT_TUPLE] 1958 if (!cda[CTA_EXPECT_TUPLE]
1856 || !cda[CTA_EXPECT_MASK] 1959 || !cda[CTA_EXPECT_MASK]
1857 || !cda[CTA_EXPECT_MASTER]) 1960 || !cda[CTA_EXPECT_MASTER])
1858 return -EINVAL; 1961 return -EINVAL;
1859 1962
1963 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1964 if (err < 0)
1965 return err;
1966
1860 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1967 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1861 if (err < 0) 1968 if (err < 0)
1862 return err; 1969 return err;
1863 1970
1864 spin_lock_bh(&nf_conntrack_lock); 1971 spin_lock_bh(&nf_conntrack_lock);
1865 exp = __nf_ct_expect_find(&init_net, &tuple); 1972 exp = __nf_ct_expect_find(net, zone, &tuple);
1866 1973
1867 if (!exp) { 1974 if (!exp) {
1868 spin_unlock_bh(&nf_conntrack_lock); 1975 spin_unlock_bh(&nf_conntrack_lock);
1869 err = -ENOENT; 1976 err = -ENOENT;
1870 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1977 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1871 err = ctnetlink_create_expect(cda, 1978 err = ctnetlink_create_expect(net, zone, cda,
1872 u3, 1979 u3,
1873 NETLINK_CB(skb).pid, 1980 NETLINK_CB(skb).pid,
1874 nlmsg_report(nlh)); 1981 nlmsg_report(nlh));
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 3807ac7faf4c..088944824e13 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack.h> 28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_helper.h> 30#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_zones.h>
31#include <linux/netfilter/nf_conntrack_proto_gre.h> 32#include <linux/netfilter/nf_conntrack_proto_gre.h>
32#include <linux/netfilter/nf_conntrack_pptp.h> 33#include <linux/netfilter/nf_conntrack_pptp.h>
33 34
@@ -123,7 +124,7 @@ static void pptp_expectfn(struct nf_conn *ct,
123 pr_debug("trying to unexpect other dir: "); 124 pr_debug("trying to unexpect other dir: ");
124 nf_ct_dump_tuple(&inv_t); 125 nf_ct_dump_tuple(&inv_t);
125 126
126 exp_other = nf_ct_expect_find_get(net, &inv_t); 127 exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t);
127 if (exp_other) { 128 if (exp_other) {
128 /* delete other expectation. */ 129 /* delete other expectation. */
129 pr_debug("found\n"); 130 pr_debug("found\n");
@@ -136,17 +137,18 @@ static void pptp_expectfn(struct nf_conn *ct,
136 rcu_read_unlock(); 137 rcu_read_unlock();
137} 138}
138 139
139static int destroy_sibling_or_exp(struct net *net, 140static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
140 const struct nf_conntrack_tuple *t) 141 const struct nf_conntrack_tuple *t)
141{ 142{
142 const struct nf_conntrack_tuple_hash *h; 143 const struct nf_conntrack_tuple_hash *h;
143 struct nf_conntrack_expect *exp; 144 struct nf_conntrack_expect *exp;
144 struct nf_conn *sibling; 145 struct nf_conn *sibling;
146 u16 zone = nf_ct_zone(ct);
145 147
146 pr_debug("trying to timeout ct or exp for tuple "); 148 pr_debug("trying to timeout ct or exp for tuple ");
147 nf_ct_dump_tuple(t); 149 nf_ct_dump_tuple(t);
148 150
149 h = nf_conntrack_find_get(net, t); 151 h = nf_conntrack_find_get(net, zone, t);
150 if (h) { 152 if (h) {
151 sibling = nf_ct_tuplehash_to_ctrack(h); 153 sibling = nf_ct_tuplehash_to_ctrack(h);
152 pr_debug("setting timeout of conntrack %p to 0\n", sibling); 154 pr_debug("setting timeout of conntrack %p to 0\n", sibling);
@@ -157,7 +159,7 @@ static int destroy_sibling_or_exp(struct net *net,
157 nf_ct_put(sibling); 159 nf_ct_put(sibling);
158 return 1; 160 return 1;
159 } else { 161 } else {
160 exp = nf_ct_expect_find_get(net, t); 162 exp = nf_ct_expect_find_get(net, zone, t);
161 if (exp) { 163 if (exp) {
162 pr_debug("unexpect_related of expect %p\n", exp); 164 pr_debug("unexpect_related of expect %p\n", exp);
163 nf_ct_unexpect_related(exp); 165 nf_ct_unexpect_related(exp);
@@ -182,7 +184,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
182 t.dst.protonum = IPPROTO_GRE; 184 t.dst.protonum = IPPROTO_GRE;
183 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; 185 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
184 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; 186 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
185 if (!destroy_sibling_or_exp(net, &t)) 187 if (!destroy_sibling_or_exp(net, ct, &t))
186 pr_debug("failed to timeout original pns->pac ct/exp\n"); 188 pr_debug("failed to timeout original pns->pac ct/exp\n");
187 189
188 /* try reply (pac->pns) tuple */ 190 /* try reply (pac->pns) tuple */
@@ -190,7 +192,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
190 t.dst.protonum = IPPROTO_GRE; 192 t.dst.protonum = IPPROTO_GRE;
191 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; 193 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
192 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; 194 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
193 if (!destroy_sibling_or_exp(net, &t)) 195 if (!destroy_sibling_or_exp(net, ct, &t))
194 pr_debug("failed to timeout reply pac->pns ct/exp\n"); 196 pr_debug("failed to timeout reply pac->pns ct/exp\n");
195} 197}
196 198
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 1a4568bf7ea5..a44fa75b5178 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/netfilter.h> 13#include <linux/netfilter.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include <linux/mutex.h> 16#include <linux/mutex.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index dd375500dccc..5292560d6d4a 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -15,6 +15,7 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/dccp.h> 17#include <linux/dccp.h>
18#include <linux/slab.h>
18 19
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <net/netns/generic.h> 21#include <net/netns/generic.h>
@@ -561,8 +562,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
561 return NF_ACCEPT; 562 return NF_ACCEPT;
562} 563}
563 564
564static int dccp_error(struct net *net, struct sk_buff *skb, 565static int dccp_error(struct net *net, struct nf_conn *tmpl,
565 unsigned int dataoff, enum ip_conntrack_info *ctinfo, 566 struct sk_buff *skb, unsigned int dataoff,
567 enum ip_conntrack_info *ctinfo,
566 u_int8_t pf, unsigned int hooknum) 568 u_int8_t pf, unsigned int hooknum)
567{ 569{
568 struct dccp_hdr _dh, *dh; 570 struct dccp_hdr _dh, *dh;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index c99cfba64ddc..cf616e55ca41 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -31,6 +31,7 @@
31#include <linux/in.h> 31#include <linux/in.h>
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/slab.h>
34#include <net/dst.h> 35#include <net/dst.h>
35#include <net/net_namespace.h> 36#include <net/net_namespace.h>
36#include <net/netns/generic.h> 37#include <net/netns/generic.h>
@@ -241,7 +242,7 @@ static int gre_packet(struct nf_conn *ct,
241 ct->proto.gre.stream_timeout); 242 ct->proto.gre.stream_timeout);
242 /* Also, more likely to be important, and not a probe. */ 243 /* Also, more likely to be important, and not a probe. */
243 set_bit(IPS_ASSURED_BIT, &ct->status); 244 set_bit(IPS_ASSURED_BIT, &ct->status);
244 nf_conntrack_event_cache(IPCT_STATUS, ct); 245 nf_conntrack_event_cache(IPCT_ASSURED, ct);
245 } else 246 } else
246 nf_ct_refresh_acct(ct, ctinfo, skb, 247 nf_ct_refresh_acct(ct, ctinfo, skb,
247 ct->proto.gre.timeout); 248 ct->proto.gre.timeout);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index f9d930f80276..b68ff15ed979 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -377,7 +377,7 @@ static int sctp_packet(struct nf_conn *ct,
377 new_state == SCTP_CONNTRACK_ESTABLISHED) { 377 new_state == SCTP_CONNTRACK_ESTABLISHED) {
378 pr_debug("Setting assured bit\n"); 378 pr_debug("Setting assured bit\n");
379 set_bit(IPS_ASSURED_BIT, &ct->status); 379 set_bit(IPS_ASSURED_BIT, &ct->status);
380 nf_conntrack_event_cache(IPCT_STATUS, ct); 380 nf_conntrack_event_cache(IPCT_ASSURED, ct);
381 } 381 }
382 382
383 return NF_ACCEPT; 383 return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3c96437b45ad..9dd8cd4fb6e6 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -760,7 +760,7 @@ static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
760}; 760};
761 761
762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ 762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
763static int tcp_error(struct net *net, 763static int tcp_error(struct net *net, struct nf_conn *tmpl,
764 struct sk_buff *skb, 764 struct sk_buff *skb,
765 unsigned int dataoff, 765 unsigned int dataoff,
766 enum ip_conntrack_info *ctinfo, 766 enum ip_conntrack_info *ctinfo,
@@ -1045,7 +1045,7 @@ static int tcp_packet(struct nf_conn *ct,
1045 after SYN_RECV or a valid answer for a picked up 1045 after SYN_RECV or a valid answer for a picked up
1046 connection. */ 1046 connection. */
1047 set_bit(IPS_ASSURED_BIT, &ct->status); 1047 set_bit(IPS_ASSURED_BIT, &ct->status);
1048 nf_conntrack_event_cache(IPCT_STATUS, ct); 1048 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1049 } 1049 }
1050 nf_ct_refresh_acct(ct, ctinfo, skb, timeout); 1050 nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1051 1051
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5c5518bedb4b..8289088b8218 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -77,7 +77,7 @@ static int udp_packet(struct nf_conn *ct,
77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream); 77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
78 /* Also, more likely to be important, and not a probe */ 78 /* Also, more likely to be important, and not a probe */
79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
80 nf_conntrack_event_cache(IPCT_STATUS, ct); 80 nf_conntrack_event_cache(IPCT_ASSURED, ct);
81 } else 81 } else
82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout); 82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
83 83
@@ -91,8 +91,8 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
91 return true; 91 return true;
92} 92}
93 93
94static int udp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 94static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
95 enum ip_conntrack_info *ctinfo, 95 unsigned int dataoff, enum ip_conntrack_info *ctinfo,
96 u_int8_t pf, 96 u_int8_t pf,
97 unsigned int hooknum) 97 unsigned int hooknum)
98{ 98{
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 458655bb2106..263b5a72588d 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -75,7 +75,7 @@ static int udplite_packet(struct nf_conn *ct,
75 nf_ct_udplite_timeout_stream); 75 nf_ct_udplite_timeout_stream);
76 /* Also, more likely to be important, and not a probe */ 76 /* Also, more likely to be important, and not a probe */
77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
78 nf_conntrack_event_cache(IPCT_STATUS, ct); 78 nf_conntrack_event_cache(IPCT_ASSURED, ct);
79 } else 79 } else
80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout); 80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
81 81
@@ -89,7 +89,7 @@ static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
89 return true; 89 return true;
90} 90}
91 91
92static int udplite_error(struct net *net, 92static int udplite_error(struct net *net, struct nf_conn *tmpl,
93 struct sk_buff *skb, 93 struct sk_buff *skb,
94 unsigned int dataoff, 94 unsigned int dataoff,
95 enum ip_conntrack_info *ctinfo, 95 enum ip_conntrack_info *ctinfo,
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index dcfecbb81c46..d9e27734b2a2 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -20,6 +20,7 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <linux/netfilter.h> 22#include <linux/netfilter.h>
23#include <linux/slab.h>
23#include <linux/in.h> 24#include <linux/in.h>
24#include <linux/tcp.h> 25#include <linux/tcp.h>
25#include <net/netfilter/nf_conntrack.h> 26#include <net/netfilter/nf_conntrack.h>
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 023966b569bf..c6cd1b84eddd 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -16,12 +16,14 @@
16#include <linux/inet.h> 16#include <linux/inet.h>
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/udp.h> 18#include <linux/udp.h>
19#include <linux/tcp.h>
19#include <linux/netfilter.h> 20#include <linux/netfilter.h>
20 21
21#include <net/netfilter/nf_conntrack.h> 22#include <net/netfilter/nf_conntrack.h>
22#include <net/netfilter/nf_conntrack_core.h> 23#include <net/netfilter/nf_conntrack_core.h>
23#include <net/netfilter/nf_conntrack_expect.h> 24#include <net/netfilter/nf_conntrack_expect.h>
24#include <net/netfilter/nf_conntrack_helper.h> 25#include <net/netfilter/nf_conntrack_helper.h>
26#include <net/netfilter/nf_conntrack_zones.h>
25#include <linux/netfilter/nf_conntrack_sip.h> 27#include <linux/netfilter/nf_conntrack_sip.h>
26 28
27MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
@@ -50,12 +52,16 @@ module_param(sip_direct_media, int, 0600);
50MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " 52MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
51 "endpoints only (default 1)"); 53 "endpoints only (default 1)");
52 54
53unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, 55unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
54 const char **dptr, 56 const char **dptr,
55 unsigned int *datalen) __read_mostly; 57 unsigned int *datalen) __read_mostly;
56EXPORT_SYMBOL_GPL(nf_nat_sip_hook); 58EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
57 59
60void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
61EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
62
58unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, 63unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
64 unsigned int dataoff,
59 const char **dptr, 65 const char **dptr,
60 unsigned int *datalen, 66 unsigned int *datalen,
61 struct nf_conntrack_expect *exp, 67 struct nf_conntrack_expect *exp,
@@ -63,17 +69,17 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
63 unsigned int matchlen) __read_mostly; 69 unsigned int matchlen) __read_mostly;
64EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); 70EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
65 71
66unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, 72unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
67 const char **dptr, 73 const char **dptr,
68 unsigned int dataoff,
69 unsigned int *datalen, 74 unsigned int *datalen,
75 unsigned int sdpoff,
70 enum sdp_header_types type, 76 enum sdp_header_types type,
71 enum sdp_header_types term, 77 enum sdp_header_types term,
72 const union nf_inet_addr *addr) 78 const union nf_inet_addr *addr)
73 __read_mostly; 79 __read_mostly;
74EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); 80EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
75 81
76unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, 82unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
77 const char **dptr, 83 const char **dptr,
78 unsigned int *datalen, 84 unsigned int *datalen,
79 unsigned int matchoff, 85 unsigned int matchoff,
@@ -82,14 +88,15 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
82EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); 88EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
83 89
84unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, 90unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
85 const char **dptr,
86 unsigned int dataoff, 91 unsigned int dataoff,
92 const char **dptr,
87 unsigned int *datalen, 93 unsigned int *datalen,
94 unsigned int sdpoff,
88 const union nf_inet_addr *addr) 95 const union nf_inet_addr *addr)
89 __read_mostly; 96 __read_mostly;
90EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); 97EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
91 98
92unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, 99unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
93 const char **dptr, 100 const char **dptr,
94 unsigned int *datalen, 101 unsigned int *datalen,
95 struct nf_conntrack_expect *rtp_exp, 102 struct nf_conntrack_expect *rtp_exp,
@@ -236,12 +243,13 @@ int ct_sip_parse_request(const struct nf_conn *ct,
236 return 0; 243 return 0;
237 244
238 /* Find SIP URI */ 245 /* Find SIP URI */
239 limit -= strlen("sip:"); 246 for (; dptr < limit - strlen("sip:"); dptr++) {
240 for (; dptr < limit; dptr++) {
241 if (*dptr == '\r' || *dptr == '\n') 247 if (*dptr == '\r' || *dptr == '\n')
242 return -1; 248 return -1;
243 if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) 249 if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) {
250 dptr += strlen("sip:");
244 break; 251 break;
252 }
245 } 253 }
246 if (!skp_epaddr_len(ct, dptr, limit, &shift)) 254 if (!skp_epaddr_len(ct, dptr, limit, &shift))
247 return 0; 255 return 0;
@@ -276,7 +284,7 @@ EXPORT_SYMBOL_GPL(ct_sip_parse_request);
276 * tabs, spaces and continuation lines, which are treated as a single whitespace 284 * tabs, spaces and continuation lines, which are treated as a single whitespace
277 * character. 285 * character.
278 * 286 *
279 * Some headers may appear multiple times. A comma seperated list of values is 287 * Some headers may appear multiple times. A comma separated list of values is
280 * equivalent to multiple headers. 288 * equivalent to multiple headers.
281 */ 289 */
282static const struct sip_header ct_sip_hdrs[] = { 290static const struct sip_header ct_sip_hdrs[] = {
@@ -284,7 +292,8 @@ static const struct sip_header ct_sip_hdrs[] = {
284 [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len), 292 [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
285 [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len), 293 [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
286 [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len), 294 [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
287 [SIP_HDR_VIA] = SIP_HDR("Via", "v", "UDP ", epaddr_len), 295 [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
296 [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len),
288 [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len), 297 [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len),
289 [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len), 298 [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len),
290}; 299};
@@ -412,7 +421,7 @@ int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
412} 421}
413EXPORT_SYMBOL_GPL(ct_sip_get_header); 422EXPORT_SYMBOL_GPL(ct_sip_get_header);
414 423
415/* Get next header field in a list of comma seperated values */ 424/* Get next header field in a list of comma separated values */
416static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, 425static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr,
417 unsigned int dataoff, unsigned int datalen, 426 unsigned int dataoff, unsigned int datalen,
418 enum sip_header_types type, 427 enum sip_header_types type,
@@ -516,6 +525,33 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
516} 525}
517EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri); 526EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
518 527
528static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr,
529 unsigned int dataoff, unsigned int datalen,
530 const char *name,
531 unsigned int *matchoff, unsigned int *matchlen)
532{
533 const char *limit = dptr + datalen;
534 const char *start;
535 const char *end;
536
537 limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
538 if (!limit)
539 limit = dptr + datalen;
540
541 start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
542 if (!start)
543 return 0;
544 start += strlen(name);
545
546 end = ct_sip_header_search(start, limit, ";", strlen(";"));
547 if (!end)
548 end = limit;
549
550 *matchoff = start - dptr;
551 *matchlen = end - start;
552 return 1;
553}
554
519/* Parse address from header parameter and return address, offset and length */ 555/* Parse address from header parameter and return address, offset and length */
520int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, 556int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
521 unsigned int dataoff, unsigned int datalen, 557 unsigned int dataoff, unsigned int datalen,
@@ -574,6 +610,29 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
574} 610}
575EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param); 611EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
576 612
613static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
614 unsigned int dataoff, unsigned int datalen,
615 u8 *proto)
616{
617 unsigned int matchoff, matchlen;
618
619 if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=",
620 &matchoff, &matchlen)) {
621 if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP")))
622 *proto = IPPROTO_TCP;
623 else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP")))
624 *proto = IPPROTO_UDP;
625 else
626 return 0;
627
628 if (*proto != nf_ct_protonum(ct))
629 return 0;
630 } else
631 *proto = nf_ct_protonum(ct);
632
633 return 1;
634}
635
577/* SDP header parsing: a SDP session description contains an ordered set of 636/* SDP header parsing: a SDP session description contains an ordered set of
578 * headers, starting with a section containing general session parameters, 637 * headers, starting with a section containing general session parameters,
579 * optionally followed by multiple media descriptions. 638 * optionally followed by multiple media descriptions.
@@ -682,7 +741,7 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
682 741
683static int refresh_signalling_expectation(struct nf_conn *ct, 742static int refresh_signalling_expectation(struct nf_conn *ct,
684 union nf_inet_addr *addr, 743 union nf_inet_addr *addr,
685 __be16 port, 744 u8 proto, __be16 port,
686 unsigned int expires) 745 unsigned int expires)
687{ 746{
688 struct nf_conn_help *help = nfct_help(ct); 747 struct nf_conn_help *help = nfct_help(ct);
@@ -694,6 +753,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
694 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 753 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
695 if (exp->class != SIP_EXPECT_SIGNALLING || 754 if (exp->class != SIP_EXPECT_SIGNALLING ||
696 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || 755 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
756 exp->tuple.dst.protonum != proto ||
697 exp->tuple.dst.u.udp.port != port) 757 exp->tuple.dst.u.udp.port != port)
698 continue; 758 continue;
699 if (!del_timer(&exp->timeout)) 759 if (!del_timer(&exp->timeout))
@@ -728,7 +788,7 @@ static void flush_expectations(struct nf_conn *ct, bool media)
728 spin_unlock_bh(&nf_conntrack_lock); 788 spin_unlock_bh(&nf_conntrack_lock);
729} 789}
730 790
731static int set_expected_rtp_rtcp(struct sk_buff *skb, 791static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
732 const char **dptr, unsigned int *datalen, 792 const char **dptr, unsigned int *datalen,
733 union nf_inet_addr *daddr, __be16 port, 793 union nf_inet_addr *daddr, __be16 port,
734 enum sip_expectation_classes class, 794 enum sip_expectation_classes class,
@@ -777,7 +837,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
777 837
778 rcu_read_lock(); 838 rcu_read_lock();
779 do { 839 do {
780 exp = __nf_ct_expect_find(net, &tuple); 840 exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
781 841
782 if (!exp || exp->master == ct || 842 if (!exp || exp->master == ct ||
783 nfct_help(exp->master)->helper != nfct_help(ct)->helper || 843 nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
@@ -805,7 +865,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
805 if (direct_rtp) { 865 if (direct_rtp) {
806 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); 866 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
807 if (nf_nat_sdp_port && 867 if (nf_nat_sdp_port &&
808 !nf_nat_sdp_port(skb, dptr, datalen, 868 !nf_nat_sdp_port(skb, dataoff, dptr, datalen,
809 mediaoff, medialen, ntohs(rtp_port))) 869 mediaoff, medialen, ntohs(rtp_port)))
810 goto err1; 870 goto err1;
811 } 871 }
@@ -827,7 +887,8 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
827 887
828 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); 888 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
829 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) 889 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
830 ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp, 890 ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
891 rtp_exp, rtcp_exp,
831 mediaoff, medialen, daddr); 892 mediaoff, medialen, daddr);
832 else { 893 else {
833 if (nf_ct_expect_related(rtp_exp) == 0) { 894 if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -847,6 +908,7 @@ err1:
847static const struct sdp_media_type sdp_media_types[] = { 908static const struct sdp_media_type sdp_media_types[] = {
848 SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO), 909 SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
849 SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO), 910 SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
911 SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE),
850}; 912};
851 913
852static const struct sdp_media_type *sdp_media_type(const char *dptr, 914static const struct sdp_media_type *sdp_media_type(const char *dptr,
@@ -866,13 +928,12 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
866 return NULL; 928 return NULL;
867} 929}
868 930
869static int process_sdp(struct sk_buff *skb, 931static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
870 const char **dptr, unsigned int *datalen, 932 const char **dptr, unsigned int *datalen,
871 unsigned int cseq) 933 unsigned int cseq)
872{ 934{
873 enum ip_conntrack_info ctinfo; 935 enum ip_conntrack_info ctinfo;
874 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 936 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
875 struct nf_conn_help *help = nfct_help(ct);
876 unsigned int matchoff, matchlen; 937 unsigned int matchoff, matchlen;
877 unsigned int mediaoff, medialen; 938 unsigned int mediaoff, medialen;
878 unsigned int sdpoff; 939 unsigned int sdpoff;
@@ -941,7 +1002,7 @@ static int process_sdp(struct sk_buff *skb,
941 else 1002 else
942 return NF_DROP; 1003 return NF_DROP;
943 1004
944 ret = set_expected_rtp_rtcp(skb, dptr, datalen, 1005 ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
945 &rtp_addr, htons(port), t->class, 1006 &rtp_addr, htons(port), t->class,
946 mediaoff, medialen); 1007 mediaoff, medialen);
947 if (ret != NF_ACCEPT) 1008 if (ret != NF_ACCEPT)
@@ -949,8 +1010,9 @@ static int process_sdp(struct sk_buff *skb,
949 1010
950 /* Update media connection address if present */ 1011 /* Update media connection address if present */
951 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { 1012 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
952 ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen, 1013 ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
953 c_hdr, SDP_HDR_MEDIA, &rtp_addr); 1014 mediaoff, c_hdr, SDP_HDR_MEDIA,
1015 &rtp_addr);
954 if (ret != NF_ACCEPT) 1016 if (ret != NF_ACCEPT)
955 return ret; 1017 return ret;
956 } 1018 }
@@ -960,14 +1022,12 @@ static int process_sdp(struct sk_buff *skb,
960 /* Update session connection and owner addresses */ 1022 /* Update session connection and owner addresses */
961 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); 1023 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
962 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) 1024 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
963 ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr); 1025 ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
964 1026 &rtp_addr);
965 if (ret == NF_ACCEPT && i > 0)
966 help->help.ct_sip_info.invite_cseq = cseq;
967 1027
968 return ret; 1028 return ret;
969} 1029}
970static int process_invite_response(struct sk_buff *skb, 1030static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
971 const char **dptr, unsigned int *datalen, 1031 const char **dptr, unsigned int *datalen,
972 unsigned int cseq, unsigned int code) 1032 unsigned int cseq, unsigned int code)
973{ 1033{
@@ -977,13 +1037,13 @@ static int process_invite_response(struct sk_buff *skb,
977 1037
978 if ((code >= 100 && code <= 199) || 1038 if ((code >= 100 && code <= 199) ||
979 (code >= 200 && code <= 299)) 1039 (code >= 200 && code <= 299))
980 return process_sdp(skb, dptr, datalen, cseq); 1040 return process_sdp(skb, dataoff, dptr, datalen, cseq);
981 else if (help->help.ct_sip_info.invite_cseq == cseq) 1041 else if (help->help.ct_sip_info.invite_cseq == cseq)
982 flush_expectations(ct, true); 1042 flush_expectations(ct, true);
983 return NF_ACCEPT; 1043 return NF_ACCEPT;
984} 1044}
985 1045
986static int process_update_response(struct sk_buff *skb, 1046static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
987 const char **dptr, unsigned int *datalen, 1047 const char **dptr, unsigned int *datalen,
988 unsigned int cseq, unsigned int code) 1048 unsigned int cseq, unsigned int code)
989{ 1049{
@@ -993,13 +1053,13 @@ static int process_update_response(struct sk_buff *skb,
993 1053
994 if ((code >= 100 && code <= 199) || 1054 if ((code >= 100 && code <= 199) ||
995 (code >= 200 && code <= 299)) 1055 (code >= 200 && code <= 299))
996 return process_sdp(skb, dptr, datalen, cseq); 1056 return process_sdp(skb, dataoff, dptr, datalen, cseq);
997 else if (help->help.ct_sip_info.invite_cseq == cseq) 1057 else if (help->help.ct_sip_info.invite_cseq == cseq)
998 flush_expectations(ct, true); 1058 flush_expectations(ct, true);
999 return NF_ACCEPT; 1059 return NF_ACCEPT;
1000} 1060}
1001 1061
1002static int process_prack_response(struct sk_buff *skb, 1062static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
1003 const char **dptr, unsigned int *datalen, 1063 const char **dptr, unsigned int *datalen,
1004 unsigned int cseq, unsigned int code) 1064 unsigned int cseq, unsigned int code)
1005{ 1065{
@@ -1009,13 +1069,29 @@ static int process_prack_response(struct sk_buff *skb,
1009 1069
1010 if ((code >= 100 && code <= 199) || 1070 if ((code >= 100 && code <= 199) ||
1011 (code >= 200 && code <= 299)) 1071 (code >= 200 && code <= 299))
1012 return process_sdp(skb, dptr, datalen, cseq); 1072 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1013 else if (help->help.ct_sip_info.invite_cseq == cseq) 1073 else if (help->help.ct_sip_info.invite_cseq == cseq)
1014 flush_expectations(ct, true); 1074 flush_expectations(ct, true);
1015 return NF_ACCEPT; 1075 return NF_ACCEPT;
1016} 1076}
1017 1077
1018static int process_bye_request(struct sk_buff *skb, 1078static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
1079 const char **dptr, unsigned int *datalen,
1080 unsigned int cseq)
1081{
1082 enum ip_conntrack_info ctinfo;
1083 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1084 struct nf_conn_help *help = nfct_help(ct);
1085 unsigned int ret;
1086
1087 flush_expectations(ct, true);
1088 ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
1089 if (ret == NF_ACCEPT)
1090 help->help.ct_sip_info.invite_cseq = cseq;
1091 return ret;
1092}
1093
1094static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
1019 const char **dptr, unsigned int *datalen, 1095 const char **dptr, unsigned int *datalen,
1020 unsigned int cseq) 1096 unsigned int cseq)
1021{ 1097{
@@ -1030,7 +1106,7 @@ static int process_bye_request(struct sk_buff *skb,
1030 * signalling connections. The expectation is marked inactive and is activated 1106 * signalling connections. The expectation is marked inactive and is activated
1031 * when receiving a response indicating success from the registrar. 1107 * when receiving a response indicating success from the registrar.
1032 */ 1108 */
1033static int process_register_request(struct sk_buff *skb, 1109static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1034 const char **dptr, unsigned int *datalen, 1110 const char **dptr, unsigned int *datalen,
1035 unsigned int cseq) 1111 unsigned int cseq)
1036{ 1112{
@@ -1042,6 +1118,7 @@ static int process_register_request(struct sk_buff *skb,
1042 struct nf_conntrack_expect *exp; 1118 struct nf_conntrack_expect *exp;
1043 union nf_inet_addr *saddr, daddr; 1119 union nf_inet_addr *saddr, daddr;
1044 __be16 port; 1120 __be16 port;
1121 u8 proto;
1045 unsigned int expires = 0; 1122 unsigned int expires = 0;
1046 int ret; 1123 int ret;
1047 typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect; 1124 typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
@@ -1074,6 +1151,10 @@ static int process_register_request(struct sk_buff *skb,
1074 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) 1151 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
1075 return NF_ACCEPT; 1152 return NF_ACCEPT;
1076 1153
1154 if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen,
1155 &proto) == 0)
1156 return NF_ACCEPT;
1157
1077 if (ct_sip_parse_numerical_param(ct, *dptr, 1158 if (ct_sip_parse_numerical_param(ct, *dptr,
1078 matchoff + matchlen, *datalen, 1159 matchoff + matchlen, *datalen,
1079 "expires=", NULL, NULL, &expires) < 0) 1160 "expires=", NULL, NULL, &expires) < 0)
@@ -1093,14 +1174,14 @@ static int process_register_request(struct sk_buff *skb,
1093 saddr = &ct->tuplehash[!dir].tuple.src.u3; 1174 saddr = &ct->tuplehash[!dir].tuple.src.u3;
1094 1175
1095 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), 1176 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
1096 saddr, &daddr, IPPROTO_UDP, NULL, &port); 1177 saddr, &daddr, proto, NULL, &port);
1097 exp->timeout.expires = sip_timeout * HZ; 1178 exp->timeout.expires = sip_timeout * HZ;
1098 exp->helper = nfct_help(ct)->helper; 1179 exp->helper = nfct_help(ct)->helper;
1099 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; 1180 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
1100 1181
1101 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); 1182 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
1102 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) 1183 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
1103 ret = nf_nat_sip_expect(skb, dptr, datalen, exp, 1184 ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
1104 matchoff, matchlen); 1185 matchoff, matchlen);
1105 else { 1186 else {
1106 if (nf_ct_expect_related(exp) != 0) 1187 if (nf_ct_expect_related(exp) != 0)
@@ -1116,7 +1197,7 @@ store_cseq:
1116 return ret; 1197 return ret;
1117} 1198}
1118 1199
1119static int process_register_response(struct sk_buff *skb, 1200static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
1120 const char **dptr, unsigned int *datalen, 1201 const char **dptr, unsigned int *datalen,
1121 unsigned int cseq, unsigned int code) 1202 unsigned int cseq, unsigned int code)
1122{ 1203{
@@ -1126,7 +1207,8 @@ static int process_register_response(struct sk_buff *skb,
1126 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1207 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1127 union nf_inet_addr addr; 1208 union nf_inet_addr addr;
1128 __be16 port; 1209 __be16 port;
1129 unsigned int matchoff, matchlen, dataoff = 0; 1210 u8 proto;
1211 unsigned int matchoff, matchlen, coff = 0;
1130 unsigned int expires = 0; 1212 unsigned int expires = 0;
1131 int in_contact = 0, ret; 1213 int in_contact = 0, ret;
1132 1214
@@ -1153,7 +1235,7 @@ static int process_register_response(struct sk_buff *skb,
1153 while (1) { 1235 while (1) {
1154 unsigned int c_expires = expires; 1236 unsigned int c_expires = expires;
1155 1237
1156 ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, 1238 ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
1157 SIP_HDR_CONTACT, &in_contact, 1239 SIP_HDR_CONTACT, &in_contact,
1158 &matchoff, &matchlen, 1240 &matchoff, &matchlen,
1159 &addr, &port); 1241 &addr, &port);
@@ -1166,6 +1248,10 @@ static int process_register_response(struct sk_buff *skb,
1166 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) 1248 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
1167 continue; 1249 continue;
1168 1250
1251 if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen,
1252 *datalen, &proto) == 0)
1253 continue;
1254
1169 ret = ct_sip_parse_numerical_param(ct, *dptr, 1255 ret = ct_sip_parse_numerical_param(ct, *dptr,
1170 matchoff + matchlen, 1256 matchoff + matchlen,
1171 *datalen, "expires=", 1257 *datalen, "expires=",
@@ -1174,7 +1260,8 @@ static int process_register_response(struct sk_buff *skb,
1174 return NF_DROP; 1260 return NF_DROP;
1175 if (c_expires == 0) 1261 if (c_expires == 0)
1176 break; 1262 break;
1177 if (refresh_signalling_expectation(ct, &addr, port, c_expires)) 1263 if (refresh_signalling_expectation(ct, &addr, proto, port,
1264 c_expires))
1178 return NF_ACCEPT; 1265 return NF_ACCEPT;
1179 } 1266 }
1180 1267
@@ -1184,7 +1271,7 @@ flush:
1184} 1271}
1185 1272
1186static const struct sip_handler sip_handlers[] = { 1273static const struct sip_handler sip_handlers[] = {
1187 SIP_HANDLER("INVITE", process_sdp, process_invite_response), 1274 SIP_HANDLER("INVITE", process_invite_request, process_invite_response),
1188 SIP_HANDLER("UPDATE", process_sdp, process_update_response), 1275 SIP_HANDLER("UPDATE", process_sdp, process_update_response),
1189 SIP_HANDLER("ACK", process_sdp, NULL), 1276 SIP_HANDLER("ACK", process_sdp, NULL),
1190 SIP_HANDLER("PRACK", process_sdp, process_prack_response), 1277 SIP_HANDLER("PRACK", process_sdp, process_prack_response),
@@ -1192,13 +1279,13 @@ static const struct sip_handler sip_handlers[] = {
1192 SIP_HANDLER("REGISTER", process_register_request, process_register_response), 1279 SIP_HANDLER("REGISTER", process_register_request, process_register_response),
1193}; 1280};
1194 1281
1195static int process_sip_response(struct sk_buff *skb, 1282static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
1196 const char **dptr, unsigned int *datalen) 1283 const char **dptr, unsigned int *datalen)
1197{ 1284{
1198 enum ip_conntrack_info ctinfo; 1285 enum ip_conntrack_info ctinfo;
1199 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1286 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1200 unsigned int matchoff, matchlen; 1287 unsigned int matchoff, matchlen, matchend;
1201 unsigned int code, cseq, dataoff, i; 1288 unsigned int code, cseq, i;
1202 1289
1203 if (*datalen < strlen("SIP/2.0 200")) 1290 if (*datalen < strlen("SIP/2.0 200"))
1204 return NF_ACCEPT; 1291 return NF_ACCEPT;
@@ -1212,7 +1299,7 @@ static int process_sip_response(struct sk_buff *skb,
1212 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1299 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1213 if (!cseq) 1300 if (!cseq)
1214 return NF_DROP; 1301 return NF_DROP;
1215 dataoff = matchoff + matchlen + 1; 1302 matchend = matchoff + matchlen + 1;
1216 1303
1217 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1304 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1218 const struct sip_handler *handler; 1305 const struct sip_handler *handler;
@@ -1220,15 +1307,16 @@ static int process_sip_response(struct sk_buff *skb,
1220 handler = &sip_handlers[i]; 1307 handler = &sip_handlers[i];
1221 if (handler->response == NULL) 1308 if (handler->response == NULL)
1222 continue; 1309 continue;
1223 if (*datalen < dataoff + handler->len || 1310 if (*datalen < matchend + handler->len ||
1224 strnicmp(*dptr + dataoff, handler->method, handler->len)) 1311 strnicmp(*dptr + matchend, handler->method, handler->len))
1225 continue; 1312 continue;
1226 return handler->response(skb, dptr, datalen, cseq, code); 1313 return handler->response(skb, dataoff, dptr, datalen,
1314 cseq, code);
1227 } 1315 }
1228 return NF_ACCEPT; 1316 return NF_ACCEPT;
1229} 1317}
1230 1318
1231static int process_sip_request(struct sk_buff *skb, 1319static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
1232 const char **dptr, unsigned int *datalen) 1320 const char **dptr, unsigned int *datalen)
1233{ 1321{
1234 enum ip_conntrack_info ctinfo; 1322 enum ip_conntrack_info ctinfo;
@@ -1253,69 +1341,157 @@ static int process_sip_request(struct sk_buff *skb,
1253 if (!cseq) 1341 if (!cseq)
1254 return NF_DROP; 1342 return NF_DROP;
1255 1343
1256 return handler->request(skb, dptr, datalen, cseq); 1344 return handler->request(skb, dataoff, dptr, datalen, cseq);
1257 } 1345 }
1258 return NF_ACCEPT; 1346 return NF_ACCEPT;
1259} 1347}
1260 1348
1261static int sip_help(struct sk_buff *skb, 1349static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
1262 unsigned int protoff, 1350 unsigned int dataoff, const char **dptr,
1263 struct nf_conn *ct, 1351 unsigned int *datalen)
1264 enum ip_conntrack_info ctinfo) 1352{
1353 typeof(nf_nat_sip_hook) nf_nat_sip;
1354 int ret;
1355
1356 if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
1357 ret = process_sip_request(skb, dataoff, dptr, datalen);
1358 else
1359 ret = process_sip_response(skb, dataoff, dptr, datalen);
1360
1361 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1362 nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
1363 if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
1364 ret = NF_DROP;
1365 }
1366
1367 return ret;
1368}
1369
1370static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1371 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
1265{ 1372{
1373 struct tcphdr *th, _tcph;
1266 unsigned int dataoff, datalen; 1374 unsigned int dataoff, datalen;
1267 const char *dptr; 1375 unsigned int matchoff, matchlen, clen;
1376 unsigned int msglen, origlen;
1377 const char *dptr, *end;
1378 s16 diff, tdiff = 0;
1268 int ret; 1379 int ret;
1269 typeof(nf_nat_sip_hook) nf_nat_sip; 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1381
1382 if (ctinfo != IP_CT_ESTABLISHED &&
1383 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
1384 return NF_ACCEPT;
1270 1385
1271 /* No Data ? */ 1386 /* No Data ? */
1272 dataoff = protoff + sizeof(struct udphdr); 1387 th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
1388 if (th == NULL)
1389 return NF_ACCEPT;
1390 dataoff = protoff + th->doff * 4;
1273 if (dataoff >= skb->len) 1391 if (dataoff >= skb->len)
1274 return NF_ACCEPT; 1392 return NF_ACCEPT;
1275 1393
1276 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1394 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1277 1395
1278 if (!skb_is_nonlinear(skb)) 1396 if (skb_is_nonlinear(skb)) {
1279 dptr = skb->data + dataoff;
1280 else {
1281 pr_debug("Copy of skbuff not supported yet.\n"); 1397 pr_debug("Copy of skbuff not supported yet.\n");
1282 return NF_ACCEPT; 1398 return NF_ACCEPT;
1283 } 1399 }
1284 1400
1401 dptr = skb->data + dataoff;
1285 datalen = skb->len - dataoff; 1402 datalen = skb->len - dataoff;
1286 if (datalen < strlen("SIP/2.0 200")) 1403 if (datalen < strlen("SIP/2.0 200"))
1287 return NF_ACCEPT; 1404 return NF_ACCEPT;
1288 1405
1289 if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) 1406 while (1) {
1290 ret = process_sip_request(skb, &dptr, &datalen); 1407 if (ct_sip_get_header(ct, dptr, 0, datalen,
1291 else 1408 SIP_HDR_CONTENT_LENGTH,
1292 ret = process_sip_response(skb, &dptr, &datalen); 1409 &matchoff, &matchlen) <= 0)
1410 break;
1411
1412 clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
1413 if (dptr + matchoff == end)
1414 break;
1415
1416 if (end + strlen("\r\n\r\n") > dptr + datalen)
1417 break;
1418 if (end[0] != '\r' || end[1] != '\n' ||
1419 end[2] != '\r' || end[3] != '\n')
1420 break;
1421 end += strlen("\r\n\r\n") + clen;
1422
1423 msglen = origlen = end - dptr;
1424
1425 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
1426 if (ret != NF_ACCEPT)
1427 break;
1428 diff = msglen - origlen;
1429 tdiff += diff;
1430
1431 dataoff += msglen;
1432 dptr += msglen;
1433 datalen = datalen + diff - msglen;
1434 }
1293 1435
1294 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { 1436 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1295 nf_nat_sip = rcu_dereference(nf_nat_sip_hook); 1437 nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
1296 if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen)) 1438 if (nf_nat_sip_seq_adjust)
1297 ret = NF_DROP; 1439 nf_nat_sip_seq_adjust(skb, tdiff);
1298 } 1440 }
1299 1441
1300 return ret; 1442 return ret;
1301} 1443}
1302 1444
1303static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly; 1445static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
1304static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly; 1446 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
1447{
1448 unsigned int dataoff, datalen;
1449 const char *dptr;
1450
1451 /* No Data ? */
1452 dataoff = protoff + sizeof(struct udphdr);
1453 if (dataoff >= skb->len)
1454 return NF_ACCEPT;
1455
1456 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1457
1458 if (skb_is_nonlinear(skb)) {
1459 pr_debug("Copy of skbuff not supported yet.\n");
1460 return NF_ACCEPT;
1461 }
1462
1463 dptr = skb->data + dataoff;
1464 datalen = skb->len - dataoff;
1465 if (datalen < strlen("SIP/2.0 200"))
1466 return NF_ACCEPT;
1467
1468 return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
1469}
1470
1471static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
1472static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
1305 1473
1306static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { 1474static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
1307 [SIP_EXPECT_SIGNALLING] = { 1475 [SIP_EXPECT_SIGNALLING] = {
1476 .name = "signalling",
1308 .max_expected = 1, 1477 .max_expected = 1,
1309 .timeout = 3 * 60, 1478 .timeout = 3 * 60,
1310 }, 1479 },
1311 [SIP_EXPECT_AUDIO] = { 1480 [SIP_EXPECT_AUDIO] = {
1481 .name = "audio",
1312 .max_expected = 2 * IP_CT_DIR_MAX, 1482 .max_expected = 2 * IP_CT_DIR_MAX,
1313 .timeout = 3 * 60, 1483 .timeout = 3 * 60,
1314 }, 1484 },
1315 [SIP_EXPECT_VIDEO] = { 1485 [SIP_EXPECT_VIDEO] = {
1486 .name = "video",
1316 .max_expected = 2 * IP_CT_DIR_MAX, 1487 .max_expected = 2 * IP_CT_DIR_MAX,
1317 .timeout = 3 * 60, 1488 .timeout = 3 * 60,
1318 }, 1489 },
1490 [SIP_EXPECT_IMAGE] = {
1491 .name = "image",
1492 .max_expected = IP_CT_DIR_MAX,
1493 .timeout = 3 * 60,
1494 },
1319}; 1495};
1320 1496
1321static void nf_conntrack_sip_fini(void) 1497static void nf_conntrack_sip_fini(void)
@@ -1323,7 +1499,7 @@ static void nf_conntrack_sip_fini(void)
1323 int i, j; 1499 int i, j;
1324 1500
1325 for (i = 0; i < ports_c; i++) { 1501 for (i = 0; i < ports_c; i++) {
1326 for (j = 0; j < 2; j++) { 1502 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1327 if (sip[i][j].me == NULL) 1503 if (sip[i][j].me == NULL)
1328 continue; 1504 continue;
1329 nf_conntrack_helper_unregister(&sip[i][j]); 1505 nf_conntrack_helper_unregister(&sip[i][j]);
@@ -1343,14 +1519,24 @@ static int __init nf_conntrack_sip_init(void)
1343 memset(&sip[i], 0, sizeof(sip[i])); 1519 memset(&sip[i], 0, sizeof(sip[i]));
1344 1520
1345 sip[i][0].tuple.src.l3num = AF_INET; 1521 sip[i][0].tuple.src.l3num = AF_INET;
1346 sip[i][1].tuple.src.l3num = AF_INET6; 1522 sip[i][0].tuple.dst.protonum = IPPROTO_UDP;
1347 for (j = 0; j < 2; j++) { 1523 sip[i][0].help = sip_help_udp;
1348 sip[i][j].tuple.dst.protonum = IPPROTO_UDP; 1524 sip[i][1].tuple.src.l3num = AF_INET;
1525 sip[i][1].tuple.dst.protonum = IPPROTO_TCP;
1526 sip[i][1].help = sip_help_tcp;
1527
1528 sip[i][2].tuple.src.l3num = AF_INET6;
1529 sip[i][2].tuple.dst.protonum = IPPROTO_UDP;
1530 sip[i][2].help = sip_help_udp;
1531 sip[i][3].tuple.src.l3num = AF_INET6;
1532 sip[i][3].tuple.dst.protonum = IPPROTO_TCP;
1533 sip[i][3].help = sip_help_tcp;
1534
1535 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1349 sip[i][j].tuple.src.u.udp.port = htons(ports[i]); 1536 sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
1350 sip[i][j].expect_policy = sip_exp_policy; 1537 sip[i][j].expect_policy = sip_exp_policy;
1351 sip[i][j].expect_class_max = SIP_EXPECT_MAX; 1538 sip[i][j].expect_class_max = SIP_EXPECT_MAX;
1352 sip[i][j].me = THIS_MODULE; 1539 sip[i][j].me = THIS_MODULE;
1353 sip[i][j].help = sip_help;
1354 1540
1355 tmpname = &sip_names[i][j][0]; 1541 tmpname = &sip_names[i][j][0];
1356 if (ports[i] == SIP_PORT) 1542 if (ports[i] == SIP_PORT)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index e310f1561bb2..faa8eb3722b9 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/netfilter.h> 10#include <linux/netfilter.h>
11#include <linux/slab.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/skbuff.h> 13#include <linux/skbuff.h>
13#include <linux/proc_fs.h> 14#include <linux/proc_fs.h>
@@ -26,6 +27,7 @@
26#include <net/netfilter/nf_conntrack_expect.h> 27#include <net/netfilter/nf_conntrack_expect.h>
27#include <net/netfilter/nf_conntrack_helper.h> 28#include <net/netfilter/nf_conntrack_helper.h>
28#include <net/netfilter/nf_conntrack_acct.h> 29#include <net/netfilter/nf_conntrack_acct.h>
30#include <net/netfilter/nf_conntrack_zones.h>
29 31
30MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
31 33
@@ -171,6 +173,11 @@ static int ct_seq_show(struct seq_file *s, void *v)
171 goto release; 173 goto release;
172#endif 174#endif
173 175
176#ifdef CONFIG_NF_CONNTRACK_ZONES
177 if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
178 goto release;
179#endif
180
174 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) 181 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
175 goto release; 182 goto release;
176 183
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 3a6fd77f7761..c49ef219899e 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -1,4 +1,5 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/slab.h>
2#include <linux/init.h> 3#include <linux/init.h>
3#include <linux/module.h> 4#include <linux/module.h>
4#include <linux/proc_fs.h> 5#include <linux/proc_fs.h>
@@ -265,7 +266,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
265 local_bh_disable(); 266 local_bh_disable();
266 entry->okfn(skb); 267 entry->okfn(skb);
267 local_bh_enable(); 268 local_bh_enable();
268 case NF_STOLEN:
269 break; 269 break;
270 case NF_QUEUE: 270 case NF_QUEUE:
271 if (!__nf_queue(skb, elem, entry->pf, entry->hook, 271 if (!__nf_queue(skb, elem, entry->pf, entry->hook,
@@ -273,6 +273,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
273 verdict >> NF_VERDICT_BITS)) 273 verdict >> NF_VERDICT_BITS))
274 goto next_hook; 274 goto next_hook;
275 break; 275 break;
276 case NF_STOLEN:
276 default: 277 default:
277 kfree_skb(skb); 278 kfree_skb(skb);
278 } 279 }
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index eedc0c1ac7a4..6afa3d52ea5f 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -40,7 +40,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
40 40
41static char __initdata nfversion[] = "0.30"; 41static char __initdata nfversion[] = "0.30";
42 42
43static struct sock *nfnl = NULL;
44static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; 43static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT];
45static DEFINE_MUTEX(nfnl_mutex); 44static DEFINE_MUTEX(nfnl_mutex);
46 45
@@ -101,34 +100,35 @@ nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
101 return &ss->cb[cb_id]; 100 return &ss->cb[cb_id];
102} 101}
103 102
104int nfnetlink_has_listeners(unsigned int group) 103int nfnetlink_has_listeners(struct net *net, unsigned int group)
105{ 104{
106 return netlink_has_listeners(nfnl, group); 105 return netlink_has_listeners(net->nfnl, group);
107} 106}
108EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 107EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
109 108
110int nfnetlink_send(struct sk_buff *skb, u32 pid, 109int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
111 unsigned group, int echo, gfp_t flags) 110 unsigned group, int echo, gfp_t flags)
112{ 111{
113 return nlmsg_notify(nfnl, skb, pid, group, echo, flags); 112 return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
114} 113}
115EXPORT_SYMBOL_GPL(nfnetlink_send); 114EXPORT_SYMBOL_GPL(nfnetlink_send);
116 115
117void nfnetlink_set_err(u32 pid, u32 group, int error) 116int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
118{ 117{
119 netlink_set_err(nfnl, pid, group, error); 118 return netlink_set_err(net->nfnl, pid, group, error);
120} 119}
121EXPORT_SYMBOL_GPL(nfnetlink_set_err); 120EXPORT_SYMBOL_GPL(nfnetlink_set_err);
122 121
123int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags) 122int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags)
124{ 123{
125 return netlink_unicast(nfnl, skb, pid, flags); 124 return netlink_unicast(net->nfnl, skb, pid, flags);
126} 125}
127EXPORT_SYMBOL_GPL(nfnetlink_unicast); 126EXPORT_SYMBOL_GPL(nfnetlink_unicast);
128 127
129/* Process one complete nfnetlink message. */ 128/* Process one complete nfnetlink message. */
130static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 129static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
131{ 130{
131 struct net *net = sock_net(skb->sk);
132 const struct nfnl_callback *nc; 132 const struct nfnl_callback *nc;
133 const struct nfnetlink_subsystem *ss; 133 const struct nfnetlink_subsystem *ss;
134 int type, err; 134 int type, err;
@@ -170,7 +170,7 @@ replay:
170 if (err < 0) 170 if (err < 0)
171 return err; 171 return err;
172 172
173 err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda); 173 err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda);
174 if (err == -EAGAIN) 174 if (err == -EAGAIN)
175 goto replay; 175 goto replay;
176 return err; 176 return err;
@@ -184,26 +184,45 @@ static void nfnetlink_rcv(struct sk_buff *skb)
184 nfnl_unlock(); 184 nfnl_unlock();
185} 185}
186 186
187static void __exit nfnetlink_exit(void) 187static int __net_init nfnetlink_net_init(struct net *net)
188{ 188{
189 printk("Removing netfilter NETLINK layer.\n"); 189 struct sock *nfnl;
190 netlink_kernel_release(nfnl); 190
191 return; 191 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX,
192 nfnetlink_rcv, NULL, THIS_MODULE);
193 if (!nfnl)
194 return -ENOMEM;
195 net->nfnl_stash = nfnl;
196 rcu_assign_pointer(net->nfnl, nfnl);
197 return 0;
192} 198}
193 199
194static int __init nfnetlink_init(void) 200static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
195{ 201{
196 printk("Netfilter messages via NETLINK v%s.\n", nfversion); 202 struct net *net;
197 203
198 nfnl = netlink_kernel_create(&init_net, NETLINK_NETFILTER, NFNLGRP_MAX, 204 list_for_each_entry(net, net_exit_list, exit_list)
199 nfnetlink_rcv, NULL, THIS_MODULE); 205 rcu_assign_pointer(net->nfnl, NULL);
200 if (!nfnl) { 206 synchronize_net();
201 printk(KERN_ERR "cannot initialize nfnetlink!\n"); 207 list_for_each_entry(net, net_exit_list, exit_list)
202 return -ENOMEM; 208 netlink_kernel_release(net->nfnl_stash);
203 } 209}
204 210
205 return 0; 211static struct pernet_operations nfnetlink_net_ops = {
212 .init = nfnetlink_net_init,
213 .exit_batch = nfnetlink_net_exit_batch,
214};
215
216static int __init nfnetlink_init(void)
217{
218 printk("Netfilter messages via NETLINK v%s.\n", nfversion);
219 return register_pernet_subsys(&nfnetlink_net_ops);
206} 220}
207 221
222static void __exit nfnetlink_exit(void)
223{
224 printk("Removing netfilter NETLINK layer.\n");
225 unregister_pernet_subsys(&nfnetlink_net_ops);
226}
208module_init(nfnetlink_init); 227module_init(nfnetlink_init);
209module_exit(nfnetlink_exit); 228module_exit(nfnetlink_exit);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 9de0470d557e..203643fb2c52 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -28,6 +28,7 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/jhash.h> 29#include <linux/jhash.h>
30#include <linux/random.h> 30#include <linux/random.h>
31#include <linux/slab.h>
31#include <net/sock.h> 32#include <net/sock.h>
32#include <net/netfilter/nf_log.h> 33#include <net/netfilter/nf_log.h>
33#include <net/netfilter/nfnetlink_log.h> 34#include <net/netfilter/nfnetlink_log.h>
@@ -323,7 +324,8 @@ __nfulnl_send(struct nfulnl_instance *inst)
323 NLMSG_DONE, 324 NLMSG_DONE,
324 sizeof(struct nfgenmsg)); 325 sizeof(struct nfgenmsg));
325 326
326 status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); 327 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
328 MSG_DONTWAIT);
327 329
328 inst->qlen = 0; 330 inst->qlen = 0;
329 inst->skb = NULL; 331 inst->skb = NULL;
@@ -767,7 +769,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
767 } 769 }
768 770
769 instance_destroy(inst); 771 instance_destroy(inst);
770 goto out; 772 goto out_put;
771 default: 773 default:
772 ret = -ENOTSUPP; 774 ret = -ENOTSUPP;
773 break; 775 break;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7e3fa410641e..e70a6ef1f4f2 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/slab.h>
21#include <linux/notifier.h> 22#include <linux/notifier.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
23#include <linux/netfilter.h> 24#include <linux/netfilter.h>
@@ -112,7 +113,6 @@ instance_create(u_int16_t queue_num, int pid)
112 inst->copy_mode = NFQNL_COPY_NONE; 113 inst->copy_mode = NFQNL_COPY_NONE;
113 spin_lock_init(&inst->lock); 114 spin_lock_init(&inst->lock);
114 INIT_LIST_HEAD(&inst->queue_list); 115 INIT_LIST_HEAD(&inst->queue_list);
115 INIT_RCU_HEAD(&inst->rcu);
116 116
117 if (!try_module_get(THIS_MODULE)) { 117 if (!try_module_get(THIS_MODULE)) {
118 err = -EAGAIN; 118 err = -EAGAIN;
@@ -414,13 +414,13 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
414 queue->queue_dropped++; 414 queue->queue_dropped++;
415 if (net_ratelimit()) 415 if (net_ratelimit())
416 printk(KERN_WARNING "nf_queue: full at %d entries, " 416 printk(KERN_WARNING "nf_queue: full at %d entries, "
417 "dropping packets(s). Dropped: %d\n", 417 "dropping packets(s).\n",
418 queue->queue_total, queue->queue_dropped); 418 queue->queue_total);
419 goto err_out_free_nskb; 419 goto err_out_free_nskb;
420 } 420 }
421 421
422 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 422 /* nfnetlink_unicast will either free the nskb or add it to a socket */
423 err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); 423 err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
424 if (err < 0) { 424 if (err < 0) {
425 queue->queue_user_dropped++; 425 queue->queue_user_dropped++;
426 goto err_out_unlock; 426 goto err_out_unlock;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index f01955cce314..665f5beef6ad 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -22,11 +22,14 @@
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/slab.h>
25#include <net/net_namespace.h> 26#include <net/net_namespace.h>
26 27
27#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
28#include <linux/netfilter_arp.h> 29#include <linux/netfilter_arp.h>
29 30#include <linux/netfilter_ipv4/ip_tables.h>
31#include <linux/netfilter_ipv6/ip6_tables.h>
32#include <linux/netfilter_arp/arp_tables.h>
30 33
31MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 35MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -37,7 +40,7 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
37struct compat_delta { 40struct compat_delta {
38 struct compat_delta *next; 41 struct compat_delta *next;
39 unsigned int offset; 42 unsigned int offset;
40 short delta; 43 int delta;
41}; 44};
42 45
43struct xt_af { 46struct xt_af {
@@ -364,8 +367,10 @@ int xt_check_match(struct xt_mtchk_param *par,
364 * ebt_among is exempt from centralized matchsize checking 367 * ebt_among is exempt from centralized matchsize checking
365 * because it uses a dynamic-size data set. 368 * because it uses a dynamic-size data set.
366 */ 369 */
367 pr_err("%s_tables: %s match: invalid size %Zu != %u\n", 370 pr_err("%s_tables: %s.%u match: invalid size "
371 "%u (kernel) != (user) %u\n",
368 xt_prefix[par->family], par->match->name, 372 xt_prefix[par->family], par->match->name,
373 par->match->revision,
369 XT_ALIGN(par->match->matchsize), size); 374 XT_ALIGN(par->match->matchsize), size);
370 return -EINVAL; 375 return -EINVAL;
371 } 376 }
@@ -435,10 +440,10 @@ void xt_compat_flush_offsets(u_int8_t af)
435} 440}
436EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); 441EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
437 442
438short xt_compat_calc_jump(u_int8_t af, unsigned int offset) 443int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
439{ 444{
440 struct compat_delta *tmp; 445 struct compat_delta *tmp;
441 short delta; 446 int delta;
442 447
443 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) 448 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
444 if (tmp->offset < offset) 449 if (tmp->offset < offset)
@@ -481,8 +486,8 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
481} 486}
482EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 487EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
483 488
484int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, 489int xt_compat_match_to_user(const struct xt_entry_match *m,
485 unsigned int *size) 490 void __user **dstptr, unsigned int *size)
486{ 491{
487 const struct xt_match *match = m->u.kernel.match; 492 const struct xt_match *match = m->u.kernel.match;
488 struct compat_xt_entry_match __user *cm = *dstptr; 493 struct compat_xt_entry_match __user *cm = *dstptr;
@@ -514,8 +519,10 @@ int xt_check_target(struct xt_tgchk_param *par,
514 unsigned int size, u_int8_t proto, bool inv_proto) 519 unsigned int size, u_int8_t proto, bool inv_proto)
515{ 520{
516 if (XT_ALIGN(par->target->targetsize) != size) { 521 if (XT_ALIGN(par->target->targetsize) != size) {
517 pr_err("%s_tables: %s target: invalid size %Zu != %u\n", 522 pr_err("%s_tables: %s.%u target: invalid size "
523 "%u (kernel) != (user) %u\n",
518 xt_prefix[par->family], par->target->name, 524 xt_prefix[par->family], par->target->name,
525 par->target->revision,
519 XT_ALIGN(par->target->targetsize), size); 526 XT_ALIGN(par->target->targetsize), size);
520 return -EINVAL; 527 return -EINVAL;
521 } 528 }
@@ -582,8 +589,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
582} 589}
583EXPORT_SYMBOL_GPL(xt_compat_target_from_user); 590EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
584 591
585int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, 592int xt_compat_target_to_user(const struct xt_entry_target *t,
586 unsigned int *size) 593 void __user **dstptr, unsigned int *size)
587{ 594{
588 const struct xt_target *target = t->u.kernel.target; 595 const struct xt_target *target = t->u.kernel.target;
589 struct compat_xt_entry_target __user *ct = *dstptr; 596 struct compat_xt_entry_target __user *ct = *dstptr;
@@ -1091,6 +1098,60 @@ static const struct file_operations xt_target_ops = {
1091 1098
1092#endif /* CONFIG_PROC_FS */ 1099#endif /* CONFIG_PROC_FS */
1093 1100
1101/**
1102 * xt_hook_link - set up hooks for a new table
1103 * @table: table with metadata needed to set up hooks
1104 * @fn: Hook function
1105 *
1106 * This function will take care of creating and registering the necessary
1107 * Netfilter hooks for XT tables.
1108 */
1109struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1110{
1111 unsigned int hook_mask = table->valid_hooks;
1112 uint8_t i, num_hooks = hweight32(hook_mask);
1113 uint8_t hooknum;
1114 struct nf_hook_ops *ops;
1115 int ret;
1116
1117 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1118 if (ops == NULL)
1119 return ERR_PTR(-ENOMEM);
1120
1121 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1122 hook_mask >>= 1, ++hooknum) {
1123 if (!(hook_mask & 1))
1124 continue;
1125 ops[i].hook = fn;
1126 ops[i].owner = table->me;
1127 ops[i].pf = table->af;
1128 ops[i].hooknum = hooknum;
1129 ops[i].priority = table->priority;
1130 ++i;
1131 }
1132
1133 ret = nf_register_hooks(ops, num_hooks);
1134 if (ret < 0) {
1135 kfree(ops);
1136 return ERR_PTR(ret);
1137 }
1138
1139 return ops;
1140}
1141EXPORT_SYMBOL_GPL(xt_hook_link);
1142
1143/**
1144 * xt_hook_unlink - remove hooks for a table
1145 * @ops: nf_hook_ops array as returned by nf_hook_link
1146 * @hook_mask: the very same mask that was passed to nf_hook_link
1147 */
1148void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1149{
1150 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1151 kfree(ops);
1152}
1153EXPORT_SYMBOL_GPL(xt_hook_unlink);
1154
1094int xt_proto_init(struct net *net, u_int8_t af) 1155int xt_proto_init(struct net *net, u_int8_t af)
1095{ 1156{
1096#ifdef CONFIG_PROC_FS 1157#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
new file mode 100644
index 000000000000..ee18b231b950
--- /dev/null
+++ b/net/netfilter/xt_CT.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright (c) 2010 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/gfp.h>
11#include <linux/skbuff.h>
12#include <linux/selinux.h>
13#include <linux/netfilter_ipv4/ip_tables.h>
14#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter/xt_CT.h>
17#include <net/netfilter/nf_conntrack.h>
18#include <net/netfilter/nf_conntrack_helper.h>
19#include <net/netfilter/nf_conntrack_ecache.h>
20#include <net/netfilter/nf_conntrack_zones.h>
21
22static unsigned int xt_ct_target(struct sk_buff *skb,
23 const struct xt_target_param *par)
24{
25 const struct xt_ct_target_info *info = par->targinfo;
26 struct nf_conn *ct = info->ct;
27
28 /* Previously seen (loopback)? Ignore. */
29 if (skb->nfct != NULL)
30 return XT_CONTINUE;
31
32 atomic_inc(&ct->ct_general.use);
33 skb->nfct = &ct->ct_general;
34 skb->nfctinfo = IP_CT_NEW;
35
36 return XT_CONTINUE;
37}
38
39static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
40{
41 if (par->family == AF_INET) {
42 const struct ipt_entry *e = par->entryinfo;
43
44 if (e->ip.invflags & IPT_INV_PROTO)
45 return 0;
46 return e->ip.proto;
47 } else if (par->family == AF_INET6) {
48 const struct ip6t_entry *e = par->entryinfo;
49
50 if (e->ipv6.invflags & IP6T_INV_PROTO)
51 return 0;
52 return e->ipv6.proto;
53 } else
54 return 0;
55}
56
57static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
58{
59 struct xt_ct_target_info *info = par->targinfo;
60 struct nf_conntrack_tuple t;
61 struct nf_conn_help *help;
62 struct nf_conn *ct;
63 u8 proto;
64
65 if (info->flags & ~XT_CT_NOTRACK)
66 return false;
67
68 if (info->flags & XT_CT_NOTRACK) {
69 ct = &nf_conntrack_untracked;
70 atomic_inc(&ct->ct_general.use);
71 goto out;
72 }
73
74#ifndef CONFIG_NF_CONNTRACK_ZONES
75 if (info->zone)
76 goto err1;
77#endif
78
79 if (nf_ct_l3proto_try_module_get(par->family) < 0)
80 goto err1;
81
82 memset(&t, 0, sizeof(t));
83 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
84 if (IS_ERR(ct))
85 goto err2;
86
87 if ((info->ct_events || info->exp_events) &&
88 !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
89 GFP_KERNEL))
90 goto err3;
91
92 if (info->helper[0]) {
93 proto = xt_ct_find_proto(par);
94 if (!proto)
95 goto err3;
96
97 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
98 if (help == NULL)
99 goto err3;
100
101 help->helper = nf_conntrack_helper_try_module_get(info->helper,
102 par->family,
103 proto);
104 if (help->helper == NULL)
105 goto err3;
106 }
107
108 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
109 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
110out:
111 info->ct = ct;
112 return true;
113
114err3:
115 nf_conntrack_free(ct);
116err2:
117 nf_ct_l3proto_module_put(par->family);
118err1:
119 return false;
120}
121
122static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
123{
124 struct xt_ct_target_info *info = par->targinfo;
125 struct nf_conn *ct = info->ct;
126 struct nf_conn_help *help;
127
128 if (ct != &nf_conntrack_untracked) {
129 help = nfct_help(ct);
130 if (help)
131 module_put(help->helper->me);
132
133 nf_ct_l3proto_module_put(par->family);
134 }
135 nf_ct_put(info->ct);
136}
137
138static struct xt_target xt_ct_tg __read_mostly = {
139 .name = "CT",
140 .family = NFPROTO_UNSPEC,
141 .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)),
142 .checkentry = xt_ct_tg_check,
143 .destroy = xt_ct_tg_destroy,
144 .target = xt_ct_target,
145 .table = "raw",
146 .me = THIS_MODULE,
147};
148
149static int __init xt_ct_tg_init(void)
150{
151 return xt_register_target(&xt_ct_tg);
152}
153
154static void __exit xt_ct_tg_exit(void)
155{
156 xt_unregister_target(&xt_ct_tg);
157}
158
159module_init(xt_ct_tg_init);
160module_exit(xt_ct_tg_exit);
161
162MODULE_LICENSE("GPL");
163MODULE_DESCRIPTION("Xtables: connection tracking target");
164MODULE_ALIAS("ipt_CT");
165MODULE_ALIAS("ip6t_CT");
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index 8ff7843bb921..3271c8e52153 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/netfilter/x_tables.h> 24#include <linux/netfilter/x_tables.h>
25#include <linux/slab.h>
25#include <linux/leds.h> 26#include <linux/leds.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
27 28
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index f28f6a5fc02d..12dcd7007c3e 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -28,6 +28,7 @@ MODULE_ALIAS("ip6t_NFQUEUE");
28MODULE_ALIAS("arpt_NFQUEUE"); 28MODULE_ALIAS("arpt_NFQUEUE");
29 29
30static u32 jhash_initval __read_mostly; 30static u32 jhash_initval __read_mostly;
31static bool rnd_inited __read_mostly;
31 32
32static unsigned int 33static unsigned int
33nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par) 34nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
@@ -90,6 +91,10 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
90 const struct xt_NFQ_info_v1 *info = par->targinfo; 91 const struct xt_NFQ_info_v1 *info = par->targinfo;
91 u32 maxid; 92 u32 maxid;
92 93
94 if (unlikely(!rnd_inited)) {
95 get_random_bytes(&jhash_initval, sizeof(jhash_initval));
96 rnd_inited = true;
97 }
93 if (info->queues_total == 0) { 98 if (info->queues_total == 0) {
94 pr_err("NFQUEUE: number of total queues is 0\n"); 99 pr_err("NFQUEUE: number of total queues is 0\n");
95 return false; 100 return false;
@@ -135,7 +140,6 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
135 140
136static int __init nfqueue_tg_init(void) 141static int __init nfqueue_tg_init(void)
137{ 142{
138 get_random_bytes(&jhash_initval, sizeof(jhash_initval));
139 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); 143 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg));
140} 144}
141 145
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index d80b8192e0d4..d16d55df4f61 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -11,6 +11,7 @@
11#include <linux/jhash.h> 11#include <linux/jhash.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/random.h> 13#include <linux/random.h>
14#include <linux/slab.h>
14#include <net/gen_stats.h> 15#include <net/gen_stats.h>
15#include <net/netlink.h> 16#include <net/netlink.h>
16 17
@@ -23,6 +24,7 @@ static DEFINE_MUTEX(xt_rateest_mutex);
23#define RATEEST_HSIZE 16 24#define RATEEST_HSIZE 16
24static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly; 25static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly;
25static unsigned int jhash_rnd __read_mostly; 26static unsigned int jhash_rnd __read_mostly;
27static bool rnd_inited __read_mostly;
26 28
27static unsigned int xt_rateest_hash(const char *name) 29static unsigned int xt_rateest_hash(const char *name)
28{ 30{
@@ -93,6 +95,11 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
93 struct gnet_estimator est; 95 struct gnet_estimator est;
94 } cfg; 96 } cfg;
95 97
98 if (unlikely(!rnd_inited)) {
99 get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
100 rnd_inited = true;
101 }
102
96 est = xt_rateest_lookup(info->name); 103 est = xt_rateest_lookup(info->name);
97 if (est) { 104 if (est) {
98 /* 105 /*
@@ -164,7 +171,6 @@ static int __init xt_rateest_tg_init(void)
164 for (i = 0; i < ARRAY_SIZE(rateest_hash); i++) 171 for (i = 0; i < ARRAY_SIZE(rateest_hash); i++)
165 INIT_HLIST_HEAD(&rateest_hash[i]); 172 INIT_HLIST_HEAD(&rateest_hash[i]);
166 173
167 get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
168 return xt_register_target(&xt_rateest_tg_reg); 174 return xt_register_target(&xt_rateest_tg_reg);
169} 175}
170 176
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index eda64c1cb1e5..c5f4b9919e9a 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/ip.h> 13#include <linux/ip.h>
14#include <linux/gfp.h>
14#include <linux/ipv6.h> 15#include <linux/ipv6.h>
15#include <linux/tcp.h> 16#include <linux/tcp.h>
16#include <net/dst.h> 17#include <net/dst.h>
@@ -60,17 +61,9 @@ tcpmss_mangle_packet(struct sk_buff *skb,
60 tcplen = skb->len - tcphoff; 61 tcplen = skb->len - tcphoff;
61 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 62 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
62 63
63 /* Since it passed flags test in tcp match, we know it is is 64 /* Header cannot be larger than the packet */
64 not a fragment, and has data >= tcp header length. SYN 65 if (tcplen < tcph->doff*4)
65 packets should not contain data: if they did, then we risk
66 running over MTU, sending Frag Needed and breaking things
67 badly. --RR */
68 if (tcplen != tcph->doff*4) {
69 if (net_ratelimit())
70 printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n",
71 skb->len);
72 return -1; 66 return -1;
73 }
74 67
75 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 68 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
76 if (dst_mtu(skb_dst(skb)) <= minlen) { 69 if (dst_mtu(skb_dst(skb)) <= minlen) {
@@ -115,6 +108,12 @@ tcpmss_mangle_packet(struct sk_buff *skb,
115 } 108 }
116 } 109 }
117 110
111 /* There is data after the header so the option can't be added
112 without moving it, and doing so may make the SYN packet
113 itself too large. Accept the packet unmodified instead. */
114 if (tcplen > tcph->doff*4)
115 return 0;
116
118 /* 117 /*
119 * MSS Option not found ?! add it.. 118 * MSS Option not found ?! add it..
120 */ 119 */
@@ -241,6 +240,7 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
241{ 240{
242 const struct xt_tcpmss_info *info = par->targinfo; 241 const struct xt_tcpmss_info *info = par->targinfo;
243 const struct ipt_entry *e = par->entryinfo; 242 const struct ipt_entry *e = par->entryinfo;
243 const struct xt_entry_match *ematch;
244 244
245 if (info->mss == XT_TCPMSS_CLAMP_PMTU && 245 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
246 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 246 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -250,8 +250,9 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
250 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 250 "FORWARD, OUTPUT and POSTROUTING hooks\n");
251 return false; 251 return false;
252 } 252 }
253 if (IPT_MATCH_ITERATE(e, find_syn_match)) 253 xt_ematch_foreach(ematch, e)
254 return true; 254 if (find_syn_match(ematch))
255 return true;
255 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 256 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
256 return false; 257 return false;
257} 258}
@@ -261,6 +262,7 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
261{ 262{
262 const struct xt_tcpmss_info *info = par->targinfo; 263 const struct xt_tcpmss_info *info = par->targinfo;
263 const struct ip6t_entry *e = par->entryinfo; 264 const struct ip6t_entry *e = par->entryinfo;
265 const struct xt_entry_match *ematch;
264 266
265 if (info->mss == XT_TCPMSS_CLAMP_PMTU && 267 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
266 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 268 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -270,8 +272,9 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
270 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 272 "FORWARD, OUTPUT and POSTROUTING hooks\n");
271 return false; 273 return false;
272 } 274 }
273 if (IP6T_MATCH_ITERATE(e, find_syn_match)) 275 xt_ematch_foreach(ematch, e)
274 return true; 276 if (find_syn_match(ematch))
277 return true;
275 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 278 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
276 return false; 279 return false;
277} 280}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 38f03f75a636..388ca4596098 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -17,6 +17,7 @@
17#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/ipv6.h> 18#include <linux/ipv6.h>
19#include <linux/jhash.h> 19#include <linux/jhash.h>
20#include <linux/slab.h>
20#include <linux/list.h> 21#include <linux/list.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/random.h> 23#include <linux/random.h>
@@ -28,6 +29,7 @@
28#include <net/netfilter/nf_conntrack.h> 29#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h> 30#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_tuple.h> 31#include <net/netfilter/nf_conntrack_tuple.h>
32#include <net/netfilter/nf_conntrack_zones.h>
31 33
32/* we will save the tuples of all connections we care about */ 34/* we will save the tuples of all connections we care about */
33struct xt_connlimit_conn { 35struct xt_connlimit_conn {
@@ -40,15 +42,11 @@ struct xt_connlimit_data {
40 spinlock_t lock; 42 spinlock_t lock;
41}; 43};
42 44
43static u_int32_t connlimit_rnd; 45static u_int32_t connlimit_rnd __read_mostly;
44static bool connlimit_rnd_inited; 46static bool connlimit_rnd_inited __read_mostly;
45 47
46static inline unsigned int connlimit_iphash(__be32 addr) 48static inline unsigned int connlimit_iphash(__be32 addr)
47{ 49{
48 if (unlikely(!connlimit_rnd_inited)) {
49 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
50 connlimit_rnd_inited = true;
51 }
52 return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF; 50 return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF;
53} 51}
54 52
@@ -59,11 +57,6 @@ connlimit_iphash6(const union nf_inet_addr *addr,
59 union nf_inet_addr res; 57 union nf_inet_addr res;
60 unsigned int i; 58 unsigned int i;
61 59
62 if (unlikely(!connlimit_rnd_inited)) {
63 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
64 connlimit_rnd_inited = true;
65 }
66
67 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) 60 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
68 res.ip6[i] = addr->ip6[i] & mask->ip6[i]; 61 res.ip6[i] = addr->ip6[i] & mask->ip6[i];
69 62
@@ -99,7 +92,8 @@ same_source_net(const union nf_inet_addr *addr,
99 } 92 }
100} 93}
101 94
102static int count_them(struct xt_connlimit_data *data, 95static int count_them(struct net *net,
96 struct xt_connlimit_data *data,
103 const struct nf_conntrack_tuple *tuple, 97 const struct nf_conntrack_tuple *tuple,
104 const union nf_inet_addr *addr, 98 const union nf_inet_addr *addr,
105 const union nf_inet_addr *mask, 99 const union nf_inet_addr *mask,
@@ -122,7 +116,8 @@ static int count_them(struct xt_connlimit_data *data,
122 116
123 /* check the saved connections */ 117 /* check the saved connections */
124 list_for_each_entry_safe(conn, tmp, hash, list) { 118 list_for_each_entry_safe(conn, tmp, hash, list) {
125 found = nf_conntrack_find_get(&init_net, &conn->tuple); 119 found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
120 &conn->tuple);
126 found_ct = NULL; 121 found_ct = NULL;
127 122
128 if (found != NULL) 123 if (found != NULL)
@@ -180,6 +175,7 @@ static int count_them(struct xt_connlimit_data *data,
180static bool 175static bool
181connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) 176connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
182{ 177{
178 struct net *net = dev_net(par->in ? par->in : par->out);
183 const struct xt_connlimit_info *info = par->matchinfo; 179 const struct xt_connlimit_info *info = par->matchinfo;
184 union nf_inet_addr addr; 180 union nf_inet_addr addr;
185 struct nf_conntrack_tuple tuple; 181 struct nf_conntrack_tuple tuple;
@@ -204,7 +200,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
204 } 200 }
205 201
206 spin_lock_bh(&info->data->lock); 202 spin_lock_bh(&info->data->lock);
207 connections = count_them(info->data, tuple_ptr, &addr, 203 connections = count_them(net, info->data, tuple_ptr, &addr,
208 &info->mask, par->family); 204 &info->mask, par->family);
209 spin_unlock_bh(&info->data->lock); 205 spin_unlock_bh(&info->data->lock);
210 206
@@ -226,6 +222,10 @@ static bool connlimit_mt_check(const struct xt_mtchk_param *par)
226 struct xt_connlimit_info *info = par->matchinfo; 222 struct xt_connlimit_info *info = par->matchinfo;
227 unsigned int i; 223 unsigned int i;
228 224
225 if (unlikely(!connlimit_rnd_inited)) {
226 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
227 connlimit_rnd_inited = true;
228 }
229 if (nf_ct_l3proto_try_module_get(par->family) < 0) { 229 if (nf_ct_l3proto_try_module_get(par->family) < 0) {
230 printk(KERN_WARNING "cannot load conntrack support for " 230 printk(KERN_WARNING "cannot load conntrack support for "
231 "address family %u\n", par->family); 231 "address family %u\n", par->family);
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
index 0989f29ade2e..395af5943ffd 100644
--- a/net/netfilter/xt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/slab.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <net/ip.h> 15#include <net/ip.h>
15#include <linux/dccp.h> 16#include <linux/dccp.h>
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index dd16e404424f..215a64835de8 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * xt_hashlimit - Netfilter module to limit the number of packets per time 2 * xt_hashlimit - Netfilter module to limit the number of packets per time
3 * seperately for each hashbucket (sourceip/sourceport/dstip/dstport) 3 * separately for each hashbucket (sourceip/sourceport/dstip/dstport)
4 * 4 *
5 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> 5 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008 6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008
@@ -26,6 +26,7 @@
26#endif 26#endif
27 27
28#include <net/net_namespace.h> 28#include <net/net_namespace.h>
29#include <net/netns/generic.h>
29 30
30#include <linux/netfilter/x_tables.h> 31#include <linux/netfilter/x_tables.h>
31#include <linux/netfilter_ipv4/ip_tables.h> 32#include <linux/netfilter_ipv4/ip_tables.h>
@@ -40,9 +41,19 @@ MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
40MODULE_ALIAS("ipt_hashlimit"); 41MODULE_ALIAS("ipt_hashlimit");
41MODULE_ALIAS("ip6t_hashlimit"); 42MODULE_ALIAS("ip6t_hashlimit");
42 43
44struct hashlimit_net {
45 struct hlist_head htables;
46 struct proc_dir_entry *ipt_hashlimit;
47 struct proc_dir_entry *ip6t_hashlimit;
48};
49
50static int hashlimit_net_id;
51static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
52{
53 return net_generic(net, hashlimit_net_id);
54}
55
43/* need to declare this at the top */ 56/* need to declare this at the top */
44static struct proc_dir_entry *hashlimit_procdir4;
45static struct proc_dir_entry *hashlimit_procdir6;
46static const struct file_operations dl_file_ops; 57static const struct file_operations dl_file_ops;
47 58
48/* hash table crap */ 59/* hash table crap */
@@ -79,27 +90,26 @@ struct dsthash_ent {
79 90
80struct xt_hashlimit_htable { 91struct xt_hashlimit_htable {
81 struct hlist_node node; /* global list of all htables */ 92 struct hlist_node node; /* global list of all htables */
82 atomic_t use; 93 int use;
83 u_int8_t family; 94 u_int8_t family;
95 bool rnd_initialized;
84 96
85 struct hashlimit_cfg1 cfg; /* config */ 97 struct hashlimit_cfg1 cfg; /* config */
86 98
87 /* used internally */ 99 /* used internally */
88 spinlock_t lock; /* lock for list_head */ 100 spinlock_t lock; /* lock for list_head */
89 u_int32_t rnd; /* random seed for hash */ 101 u_int32_t rnd; /* random seed for hash */
90 int rnd_initialized;
91 unsigned int count; /* number entries in table */ 102 unsigned int count; /* number entries in table */
92 struct timer_list timer; /* timer for gc */ 103 struct timer_list timer; /* timer for gc */
93 104
94 /* seq_file stuff */ 105 /* seq_file stuff */
95 struct proc_dir_entry *pde; 106 struct proc_dir_entry *pde;
107 struct net *net;
96 108
97 struct hlist_head hash[0]; /* hashtable itself */ 109 struct hlist_head hash[0]; /* hashtable itself */
98}; 110};
99 111
100static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ 112static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
101static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
102static HLIST_HEAD(hashlimit_htables);
103static struct kmem_cache *hashlimit_cachep __read_mostly; 113static struct kmem_cache *hashlimit_cachep __read_mostly;
104 114
105static inline bool dst_cmp(const struct dsthash_ent *ent, 115static inline bool dst_cmp(const struct dsthash_ent *ent,
@@ -150,7 +160,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
150 * the first hashtable entry */ 160 * the first hashtable entry */
151 if (!ht->rnd_initialized) { 161 if (!ht->rnd_initialized) {
152 get_random_bytes(&ht->rnd, sizeof(ht->rnd)); 162 get_random_bytes(&ht->rnd, sizeof(ht->rnd));
153 ht->rnd_initialized = 1; 163 ht->rnd_initialized = true;
154 } 164 }
155 165
156 if (ht->cfg.max && ht->count >= ht->cfg.max) { 166 if (ht->cfg.max && ht->count >= ht->cfg.max) {
@@ -185,8 +195,9 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
185} 195}
186static void htable_gc(unsigned long htlong); 196static void htable_gc(unsigned long htlong);
187 197
188static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) 198static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
189{ 199{
200 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
190 struct xt_hashlimit_htable *hinfo; 201 struct xt_hashlimit_htable *hinfo;
191 unsigned int size; 202 unsigned int size;
192 unsigned int i; 203 unsigned int i;
@@ -232,33 +243,34 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
232 for (i = 0; i < hinfo->cfg.size; i++) 243 for (i = 0; i < hinfo->cfg.size; i++)
233 INIT_HLIST_HEAD(&hinfo->hash[i]); 244 INIT_HLIST_HEAD(&hinfo->hash[i]);
234 245
235 atomic_set(&hinfo->use, 1); 246 hinfo->use = 1;
236 hinfo->count = 0; 247 hinfo->count = 0;
237 hinfo->family = family; 248 hinfo->family = family;
238 hinfo->rnd_initialized = 0; 249 hinfo->rnd_initialized = false;
239 spin_lock_init(&hinfo->lock); 250 spin_lock_init(&hinfo->lock);
240 hinfo->pde = proc_create_data(minfo->name, 0, 251 hinfo->pde = proc_create_data(minfo->name, 0,
241 (family == NFPROTO_IPV4) ? 252 (family == NFPROTO_IPV4) ?
242 hashlimit_procdir4 : hashlimit_procdir6, 253 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
243 &dl_file_ops, hinfo); 254 &dl_file_ops, hinfo);
244 if (!hinfo->pde) { 255 if (!hinfo->pde) {
245 vfree(hinfo); 256 vfree(hinfo);
246 return -1; 257 return -1;
247 } 258 }
259 hinfo->net = net;
248 260
249 setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo); 261 setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
250 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); 262 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
251 add_timer(&hinfo->timer); 263 add_timer(&hinfo->timer);
252 264
253 spin_lock_bh(&hashlimit_lock); 265 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
254 hlist_add_head(&hinfo->node, &hashlimit_htables);
255 spin_unlock_bh(&hashlimit_lock);
256 266
257 return 0; 267 return 0;
258} 268}
259 269
260static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) 270static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
271 u_int8_t family)
261{ 272{
273 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
262 struct xt_hashlimit_htable *hinfo; 274 struct xt_hashlimit_htable *hinfo;
263 unsigned int size; 275 unsigned int size;
264 unsigned int i; 276 unsigned int i;
@@ -293,28 +305,27 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
293 for (i = 0; i < hinfo->cfg.size; i++) 305 for (i = 0; i < hinfo->cfg.size; i++)
294 INIT_HLIST_HEAD(&hinfo->hash[i]); 306 INIT_HLIST_HEAD(&hinfo->hash[i]);
295 307
296 atomic_set(&hinfo->use, 1); 308 hinfo->use = 1;
297 hinfo->count = 0; 309 hinfo->count = 0;
298 hinfo->family = family; 310 hinfo->family = family;
299 hinfo->rnd_initialized = 0; 311 hinfo->rnd_initialized = false;
300 spin_lock_init(&hinfo->lock); 312 spin_lock_init(&hinfo->lock);
301 313
302 hinfo->pde = proc_create_data(minfo->name, 0, 314 hinfo->pde = proc_create_data(minfo->name, 0,
303 (family == NFPROTO_IPV4) ? 315 (family == NFPROTO_IPV4) ?
304 hashlimit_procdir4 : hashlimit_procdir6, 316 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
305 &dl_file_ops, hinfo); 317 &dl_file_ops, hinfo);
306 if (hinfo->pde == NULL) { 318 if (hinfo->pde == NULL) {
307 vfree(hinfo); 319 vfree(hinfo);
308 return -1; 320 return -1;
309 } 321 }
322 hinfo->net = net;
310 323
311 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); 324 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
312 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); 325 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
313 add_timer(&hinfo->timer); 326 add_timer(&hinfo->timer);
314 327
315 spin_lock_bh(&hashlimit_lock); 328 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
316 hlist_add_head(&hinfo->node, &hashlimit_htables);
317 spin_unlock_bh(&hashlimit_lock);
318 329
319 return 0; 330 return 0;
320} 331}
@@ -364,43 +375,46 @@ static void htable_gc(unsigned long htlong)
364 375
365static void htable_destroy(struct xt_hashlimit_htable *hinfo) 376static void htable_destroy(struct xt_hashlimit_htable *hinfo)
366{ 377{
378 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
379 struct proc_dir_entry *parent;
380
367 del_timer_sync(&hinfo->timer); 381 del_timer_sync(&hinfo->timer);
368 382
369 /* remove proc entry */ 383 if (hinfo->family == NFPROTO_IPV4)
370 remove_proc_entry(hinfo->pde->name, 384 parent = hashlimit_net->ipt_hashlimit;
371 hinfo->family == NFPROTO_IPV4 ? hashlimit_procdir4 : 385 else
372 hashlimit_procdir6); 386 parent = hashlimit_net->ip6t_hashlimit;
387 remove_proc_entry(hinfo->pde->name, parent);
373 htable_selective_cleanup(hinfo, select_all); 388 htable_selective_cleanup(hinfo, select_all);
374 vfree(hinfo); 389 vfree(hinfo);
375} 390}
376 391
377static struct xt_hashlimit_htable *htable_find_get(const char *name, 392static struct xt_hashlimit_htable *htable_find_get(struct net *net,
393 const char *name,
378 u_int8_t family) 394 u_int8_t family)
379{ 395{
396 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
380 struct xt_hashlimit_htable *hinfo; 397 struct xt_hashlimit_htable *hinfo;
381 struct hlist_node *pos; 398 struct hlist_node *pos;
382 399
383 spin_lock_bh(&hashlimit_lock); 400 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
384 hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
385 if (!strcmp(name, hinfo->pde->name) && 401 if (!strcmp(name, hinfo->pde->name) &&
386 hinfo->family == family) { 402 hinfo->family == family) {
387 atomic_inc(&hinfo->use); 403 hinfo->use++;
388 spin_unlock_bh(&hashlimit_lock);
389 return hinfo; 404 return hinfo;
390 } 405 }
391 } 406 }
392 spin_unlock_bh(&hashlimit_lock);
393 return NULL; 407 return NULL;
394} 408}
395 409
396static void htable_put(struct xt_hashlimit_htable *hinfo) 410static void htable_put(struct xt_hashlimit_htable *hinfo)
397{ 411{
398 if (atomic_dec_and_test(&hinfo->use)) { 412 mutex_lock(&hashlimit_mutex);
399 spin_lock_bh(&hashlimit_lock); 413 if (--hinfo->use == 0) {
400 hlist_del(&hinfo->node); 414 hlist_del(&hinfo->node);
401 spin_unlock_bh(&hashlimit_lock);
402 htable_destroy(hinfo); 415 htable_destroy(hinfo);
403 } 416 }
417 mutex_unlock(&hashlimit_mutex);
404} 418}
405 419
406/* The algorithm used is the Simple Token Bucket Filter (TBF) 420/* The algorithm used is the Simple Token Bucket Filter (TBF)
@@ -479,6 +493,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
479 case 64 ... 95: 493 case 64 ... 95:
480 i[2] = maskl(i[2], p - 64); 494 i[2] = maskl(i[2], p - 64);
481 i[3] = 0; 495 i[3] = 0;
496 break;
482 case 96 ... 127: 497 case 96 ... 127:
483 i[3] = maskl(i[3], p - 96); 498 i[3] = maskl(i[3], p - 96);
484 break; 499 break;
@@ -665,6 +680,7 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
665 680
666static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par) 681static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
667{ 682{
683 struct net *net = par->net;
668 struct xt_hashlimit_info *r = par->matchinfo; 684 struct xt_hashlimit_info *r = par->matchinfo;
669 685
670 /* Check for overflow. */ 686 /* Check for overflow. */
@@ -687,25 +703,20 @@ static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
687 if (r->name[sizeof(r->name) - 1] != '\0') 703 if (r->name[sizeof(r->name) - 1] != '\0')
688 return false; 704 return false;
689 705
690 /* This is the best we've got: We cannot release and re-grab lock, 706 mutex_lock(&hashlimit_mutex);
691 * since checkentry() is called before x_tables.c grabs xt_mutex. 707 r->hinfo = htable_find_get(net, r->name, par->match->family);
692 * We also cannot grab the hashtable spinlock, since htable_create will 708 if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
693 * call vmalloc, and that can sleep. And we cannot just re-search 709 mutex_unlock(&hashlimit_mutex);
694 * the list of htable's in htable_create(), since then we would
695 * create duplicate proc files. -HW */
696 mutex_lock(&hlimit_mutex);
697 r->hinfo = htable_find_get(r->name, par->match->family);
698 if (!r->hinfo && htable_create_v0(r, par->match->family) != 0) {
699 mutex_unlock(&hlimit_mutex);
700 return false; 710 return false;
701 } 711 }
702 mutex_unlock(&hlimit_mutex); 712 mutex_unlock(&hashlimit_mutex);
703 713
704 return true; 714 return true;
705} 715}
706 716
707static bool hashlimit_mt_check(const struct xt_mtchk_param *par) 717static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
708{ 718{
719 struct net *net = par->net;
709 struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 720 struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
710 721
711 /* Check for overflow. */ 722 /* Check for overflow. */
@@ -728,19 +739,13 @@ static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
728 return false; 739 return false;
729 } 740 }
730 741
731 /* This is the best we've got: We cannot release and re-grab lock, 742 mutex_lock(&hashlimit_mutex);
732 * since checkentry() is called before x_tables.c grabs xt_mutex. 743 info->hinfo = htable_find_get(net, info->name, par->match->family);
733 * We also cannot grab the hashtable spinlock, since htable_create will 744 if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
734 * call vmalloc, and that can sleep. And we cannot just re-search 745 mutex_unlock(&hashlimit_mutex);
735 * the list of htable's in htable_create(), since then we would
736 * create duplicate proc files. -HW */
737 mutex_lock(&hlimit_mutex);
738 info->hinfo = htable_find_get(info->name, par->match->family);
739 if (!info->hinfo && htable_create(info, par->match->family) != 0) {
740 mutex_unlock(&hlimit_mutex);
741 return false; 746 return false;
742 } 747 }
743 mutex_unlock(&hlimit_mutex); 748 mutex_unlock(&hashlimit_mutex);
744 return true; 749 return true;
745} 750}
746 751
@@ -767,7 +772,7 @@ struct compat_xt_hashlimit_info {
767 compat_uptr_t master; 772 compat_uptr_t master;
768}; 773};
769 774
770static void hashlimit_mt_compat_from_user(void *dst, void *src) 775static void hashlimit_mt_compat_from_user(void *dst, const void *src)
771{ 776{
772 int off = offsetof(struct compat_xt_hashlimit_info, hinfo); 777 int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
773 778
@@ -775,7 +780,7 @@ static void hashlimit_mt_compat_from_user(void *dst, void *src)
775 memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off); 780 memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
776} 781}
777 782
778static int hashlimit_mt_compat_to_user(void __user *dst, void *src) 783static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
779{ 784{
780 int off = offsetof(struct compat_xt_hashlimit_info, hinfo); 785 int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
781 786
@@ -841,8 +846,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
841static void *dl_seq_start(struct seq_file *s, loff_t *pos) 846static void *dl_seq_start(struct seq_file *s, loff_t *pos)
842 __acquires(htable->lock) 847 __acquires(htable->lock)
843{ 848{
844 struct proc_dir_entry *pde = s->private; 849 struct xt_hashlimit_htable *htable = s->private;
845 struct xt_hashlimit_htable *htable = pde->data;
846 unsigned int *bucket; 850 unsigned int *bucket;
847 851
848 spin_lock_bh(&htable->lock); 852 spin_lock_bh(&htable->lock);
@@ -859,8 +863,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
859 863
860static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 864static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
861{ 865{
862 struct proc_dir_entry *pde = s->private; 866 struct xt_hashlimit_htable *htable = s->private;
863 struct xt_hashlimit_htable *htable = pde->data;
864 unsigned int *bucket = (unsigned int *)v; 867 unsigned int *bucket = (unsigned int *)v;
865 868
866 *pos = ++(*bucket); 869 *pos = ++(*bucket);
@@ -874,11 +877,11 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
874static void dl_seq_stop(struct seq_file *s, void *v) 877static void dl_seq_stop(struct seq_file *s, void *v)
875 __releases(htable->lock) 878 __releases(htable->lock)
876{ 879{
877 struct proc_dir_entry *pde = s->private; 880 struct xt_hashlimit_htable *htable = s->private;
878 struct xt_hashlimit_htable *htable = pde->data;
879 unsigned int *bucket = (unsigned int *)v; 881 unsigned int *bucket = (unsigned int *)v;
880 882
881 kfree(bucket); 883 if (!IS_ERR(bucket))
884 kfree(bucket);
882 spin_unlock_bh(&htable->lock); 885 spin_unlock_bh(&htable->lock);
883} 886}
884 887
@@ -917,8 +920,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
917 920
918static int dl_seq_show(struct seq_file *s, void *v) 921static int dl_seq_show(struct seq_file *s, void *v)
919{ 922{
920 struct proc_dir_entry *pde = s->private; 923 struct xt_hashlimit_htable *htable = s->private;
921 struct xt_hashlimit_htable *htable = pde->data;
922 unsigned int *bucket = (unsigned int *)v; 924 unsigned int *bucket = (unsigned int *)v;
923 struct dsthash_ent *ent; 925 struct dsthash_ent *ent;
924 struct hlist_node *pos; 926 struct hlist_node *pos;
@@ -944,7 +946,7 @@ static int dl_proc_open(struct inode *inode, struct file *file)
944 946
945 if (!ret) { 947 if (!ret) {
946 struct seq_file *sf = file->private_data; 948 struct seq_file *sf = file->private_data;
947 sf->private = PDE(inode); 949 sf->private = PDE(inode)->data;
948 } 950 }
949 return ret; 951 return ret;
950} 952}
@@ -957,10 +959,61 @@ static const struct file_operations dl_file_ops = {
957 .release = seq_release 959 .release = seq_release
958}; 960};
959 961
962static int __net_init hashlimit_proc_net_init(struct net *net)
963{
964 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
965
966 hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
967 if (!hashlimit_net->ipt_hashlimit)
968 return -ENOMEM;
969#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
970 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
971 if (!hashlimit_net->ip6t_hashlimit) {
972 proc_net_remove(net, "ipt_hashlimit");
973 return -ENOMEM;
974 }
975#endif
976 return 0;
977}
978
979static void __net_exit hashlimit_proc_net_exit(struct net *net)
980{
981 proc_net_remove(net, "ipt_hashlimit");
982#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
983 proc_net_remove(net, "ip6t_hashlimit");
984#endif
985}
986
987static int __net_init hashlimit_net_init(struct net *net)
988{
989 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
990
991 INIT_HLIST_HEAD(&hashlimit_net->htables);
992 return hashlimit_proc_net_init(net);
993}
994
995static void __net_exit hashlimit_net_exit(struct net *net)
996{
997 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
998
999 BUG_ON(!hlist_empty(&hashlimit_net->htables));
1000 hashlimit_proc_net_exit(net);
1001}
1002
1003static struct pernet_operations hashlimit_net_ops = {
1004 .init = hashlimit_net_init,
1005 .exit = hashlimit_net_exit,
1006 .id = &hashlimit_net_id,
1007 .size = sizeof(struct hashlimit_net),
1008};
1009
960static int __init hashlimit_mt_init(void) 1010static int __init hashlimit_mt_init(void)
961{ 1011{
962 int err; 1012 int err;
963 1013
1014 err = register_pernet_subsys(&hashlimit_net_ops);
1015 if (err < 0)
1016 return err;
964 err = xt_register_matches(hashlimit_mt_reg, 1017 err = xt_register_matches(hashlimit_mt_reg,
965 ARRAY_SIZE(hashlimit_mt_reg)); 1018 ARRAY_SIZE(hashlimit_mt_reg));
966 if (err < 0) 1019 if (err < 0)
@@ -974,41 +1027,21 @@ static int __init hashlimit_mt_init(void)
974 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); 1027 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
975 goto err2; 1028 goto err2;
976 } 1029 }
977 hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net); 1030 return 0;
978 if (!hashlimit_procdir4) { 1031
979 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
980 "entry\n");
981 goto err3;
982 }
983 err = 0;
984#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
985 hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net);
986 if (!hashlimit_procdir6) {
987 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
988 "entry\n");
989 err = -ENOMEM;
990 }
991#endif
992 if (!err)
993 return 0;
994 remove_proc_entry("ipt_hashlimit", init_net.proc_net);
995err3:
996 kmem_cache_destroy(hashlimit_cachep);
997err2: 1032err2:
998 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1033 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
999err1: 1034err1:
1035 unregister_pernet_subsys(&hashlimit_net_ops);
1000 return err; 1036 return err;
1001 1037
1002} 1038}
1003 1039
1004static void __exit hashlimit_mt_exit(void) 1040static void __exit hashlimit_mt_exit(void)
1005{ 1041{
1006 remove_proc_entry("ipt_hashlimit", init_net.proc_net);
1007#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
1008 remove_proc_entry("ip6t_hashlimit", init_net.proc_net);
1009#endif
1010 kmem_cache_destroy(hashlimit_cachep); 1042 kmem_cache_destroy(hashlimit_cachep);
1011 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1043 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
1044 unregister_pernet_subsys(&hashlimit_net_ops);
1012} 1045}
1013 1046
1014module_init(hashlimit_mt_init); 1047module_init(hashlimit_mt_init);
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2773be6a71dd..e5d7e1ffb1a4 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -6,6 +6,7 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/slab.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/skbuff.h> 11#include <linux/skbuff.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
@@ -148,7 +149,7 @@ struct compat_xt_rateinfo {
148 149
149/* To keep the full "prev" timestamp, the upper 32 bits are stored in the 150/* To keep the full "prev" timestamp, the upper 32 bits are stored in the
150 * master pointer, which does not need to be preserved. */ 151 * master pointer, which does not need to be preserved. */
151static void limit_mt_compat_from_user(void *dst, void *src) 152static void limit_mt_compat_from_user(void *dst, const void *src)
152{ 153{
153 const struct compat_xt_rateinfo *cm = src; 154 const struct compat_xt_rateinfo *cm = src;
154 struct xt_rateinfo m = { 155 struct xt_rateinfo m = {
@@ -162,7 +163,7 @@ static void limit_mt_compat_from_user(void *dst, void *src)
162 memcpy(dst, &m, sizeof(m)); 163 memcpy(dst, &m, sizeof(m));
163} 164}
164 165
165static int limit_mt_compat_to_user(void __user *dst, void *src) 166static int limit_mt_compat_to_user(void __user *dst, const void *src)
166{ 167{
167 const struct xt_rateinfo *m = src; 168 const struct xt_rateinfo *m = src;
168 struct compat_xt_rateinfo cm = { 169 struct compat_xt_rateinfo cm = {
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 4d1a41bbd5d7..4169e200588d 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -334,7 +334,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
334 if (info->flags & XT_OSF_LOG) 334 if (info->flags & XT_OSF_LOG)
335 nf_log_packet(p->family, p->hooknum, skb, 335 nf_log_packet(p->family, p->hooknum, skb,
336 p->in, p->out, NULL, 336 p->in, p->out, NULL,
337 "%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n", 337 "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
338 f->genre, f->version, f->subtype, 338 f->genre, f->version, f->subtype,
339 &ip->saddr, ntohs(tcp->source), 339 &ip->saddr, ntohs(tcp->source),
340 &ip->daddr, ntohs(tcp->dest), 340 &ip->daddr, ntohs(tcp->dest),
@@ -349,7 +349,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
349 349
350 if (!fcount && (info->flags & XT_OSF_LOG)) 350 if (!fcount && (info->flags & XT_OSF_LOG))
351 nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, 351 nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL,
352 "Remote OS is not known: %pi4:%u -> %pi4:%u\n", 352 "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
353 &ip->saddr, ntohs(tcp->source), 353 &ip->saddr, ntohs(tcp->source),
354 &ip->daddr, ntohs(tcp->dest)); 354 &ip->daddr, ntohs(tcp->dest));
355 355
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 390b7d09fe51..2d5562498c43 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -4,6 +4,7 @@
4 * Sam Johnston <samj@samj.net> 4 * Sam Johnston <samj@samj.net>
5 */ 5 */
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/slab.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
8 9
9#include <linux/netfilter/x_tables.h> 10#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc70a49c0afd..834b736857cb 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -27,7 +27,9 @@
27#include <linux/bitops.h> 27#include <linux/bitops.h>
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/inet.h> 29#include <linux/inet.h>
30#include <linux/slab.h>
30#include <net/net_namespace.h> 31#include <net/net_namespace.h>
32#include <net/netns/generic.h>
31 33
32#include <linux/netfilter/x_tables.h> 34#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter/xt_recent.h> 35#include <linux/netfilter/xt_recent.h>
@@ -52,7 +54,7 @@ module_param(ip_list_perms, uint, 0400);
52module_param(ip_list_uid, uint, 0400); 54module_param(ip_list_uid, uint, 0400);
53module_param(ip_list_gid, uint, 0400); 55module_param(ip_list_gid, uint, 0400);
54MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); 56MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
55MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP to remember (max. 255)"); 57MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
56MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); 58MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
57MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); 59MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
58MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files"); 60MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files");
@@ -78,37 +80,40 @@ struct recent_table {
78 struct list_head iphash[0]; 80 struct list_head iphash[0];
79}; 81};
80 82
81static LIST_HEAD(tables); 83struct recent_net {
84 struct list_head tables;
85#ifdef CONFIG_PROC_FS
86 struct proc_dir_entry *xt_recent;
87#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
88 struct proc_dir_entry *ipt_recent;
89#endif
90#endif
91};
92
93static int recent_net_id;
94static inline struct recent_net *recent_pernet(struct net *net)
95{
96 return net_generic(net, recent_net_id);
97}
98
82static DEFINE_SPINLOCK(recent_lock); 99static DEFINE_SPINLOCK(recent_lock);
83static DEFINE_MUTEX(recent_mutex); 100static DEFINE_MUTEX(recent_mutex);
84 101
85#ifdef CONFIG_PROC_FS 102#ifdef CONFIG_PROC_FS
86#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
87static struct proc_dir_entry *proc_old_dir;
88#endif
89static struct proc_dir_entry *recent_proc_dir;
90static const struct file_operations recent_old_fops, recent_mt_fops; 103static const struct file_operations recent_old_fops, recent_mt_fops;
91#endif 104#endif
92 105
93static u_int32_t hash_rnd; 106static u_int32_t hash_rnd __read_mostly;
94static bool hash_rnd_initted; 107static bool hash_rnd_inited __read_mostly;
95 108
96static unsigned int recent_entry_hash4(const union nf_inet_addr *addr) 109static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr)
97{ 110{
98 if (!hash_rnd_initted) {
99 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
100 hash_rnd_initted = true;
101 }
102 return jhash_1word((__force u32)addr->ip, hash_rnd) & 111 return jhash_1word((__force u32)addr->ip, hash_rnd) &
103 (ip_list_hash_size - 1); 112 (ip_list_hash_size - 1);
104} 113}
105 114
106static unsigned int recent_entry_hash6(const union nf_inet_addr *addr) 115static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
107{ 116{
108 if (!hash_rnd_initted) {
109 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
110 hash_rnd_initted = true;
111 }
112 return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) & 117 return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) &
113 (ip_list_hash_size - 1); 118 (ip_list_hash_size - 1);
114} 119}
@@ -173,18 +178,19 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
173 178
174static void recent_entry_update(struct recent_table *t, struct recent_entry *e) 179static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
175{ 180{
181 e->index %= ip_pkt_list_tot;
176 e->stamps[e->index++] = jiffies; 182 e->stamps[e->index++] = jiffies;
177 if (e->index > e->nstamps) 183 if (e->index > e->nstamps)
178 e->nstamps = e->index; 184 e->nstamps = e->index;
179 e->index %= ip_pkt_list_tot;
180 list_move_tail(&e->lru_list, &t->lru_list); 185 list_move_tail(&e->lru_list, &t->lru_list);
181} 186}
182 187
183static struct recent_table *recent_table_lookup(const char *name) 188static struct recent_table *recent_table_lookup(struct recent_net *recent_net,
189 const char *name)
184{ 190{
185 struct recent_table *t; 191 struct recent_table *t;
186 192
187 list_for_each_entry(t, &tables, list) 193 list_for_each_entry(t, &recent_net->tables, list)
188 if (!strcmp(t->name, name)) 194 if (!strcmp(t->name, name))
189 return t; 195 return t;
190 return NULL; 196 return NULL;
@@ -203,6 +209,8 @@ static void recent_table_flush(struct recent_table *t)
203static bool 209static bool
204recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) 210recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
205{ 211{
212 struct net *net = dev_net(par->in ? par->in : par->out);
213 struct recent_net *recent_net = recent_pernet(net);
206 const struct xt_recent_mtinfo *info = par->matchinfo; 214 const struct xt_recent_mtinfo *info = par->matchinfo;
207 struct recent_table *t; 215 struct recent_table *t;
208 struct recent_entry *e; 216 struct recent_entry *e;
@@ -235,7 +243,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
235 ttl++; 243 ttl++;
236 244
237 spin_lock_bh(&recent_lock); 245 spin_lock_bh(&recent_lock);
238 t = recent_table_lookup(info->name); 246 t = recent_table_lookup(recent_net, info->name);
239 e = recent_entry_lookup(t, &addr, par->match->family, 247 e = recent_entry_lookup(t, &addr, par->match->family,
240 (info->check_set & XT_RECENT_TTL) ? ttl : 0); 248 (info->check_set & XT_RECENT_TTL) ? ttl : 0);
241 if (e == NULL) { 249 if (e == NULL) {
@@ -260,7 +268,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
260 for (i = 0; i < e->nstamps; i++) { 268 for (i = 0; i < e->nstamps; i++) {
261 if (info->seconds && time_after(time, e->stamps[i])) 269 if (info->seconds && time_after(time, e->stamps[i]))
262 continue; 270 continue;
263 if (++hits >= info->hit_count) { 271 if (!info->hit_count || ++hits >= info->hit_count) {
264 ret = !ret; 272 ret = !ret;
265 break; 273 break;
266 } 274 }
@@ -279,6 +287,7 @@ out:
279 287
280static bool recent_mt_check(const struct xt_mtchk_param *par) 288static bool recent_mt_check(const struct xt_mtchk_param *par)
281{ 289{
290 struct recent_net *recent_net = recent_pernet(par->net);
282 const struct xt_recent_mtinfo *info = par->matchinfo; 291 const struct xt_recent_mtinfo *info = par->matchinfo;
283 struct recent_table *t; 292 struct recent_table *t;
284#ifdef CONFIG_PROC_FS 293#ifdef CONFIG_PROC_FS
@@ -287,6 +296,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
287 unsigned i; 296 unsigned i;
288 bool ret = false; 297 bool ret = false;
289 298
299 if (unlikely(!hash_rnd_inited)) {
300 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
301 hash_rnd_inited = true;
302 }
290 if (hweight8(info->check_set & 303 if (hweight8(info->check_set &
291 (XT_RECENT_SET | XT_RECENT_REMOVE | 304 (XT_RECENT_SET | XT_RECENT_REMOVE |
292 XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) 305 XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
@@ -294,14 +307,18 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
294 if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && 307 if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
295 (info->seconds || info->hit_count)) 308 (info->seconds || info->hit_count))
296 return false; 309 return false;
297 if (info->hit_count > ip_pkt_list_tot) 310 if (info->hit_count > ip_pkt_list_tot) {
311 pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than "
312 "packets to be remembered (%u)\n",
313 info->hit_count, ip_pkt_list_tot);
298 return false; 314 return false;
315 }
299 if (info->name[0] == '\0' || 316 if (info->name[0] == '\0' ||
300 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) 317 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
301 return false; 318 return false;
302 319
303 mutex_lock(&recent_mutex); 320 mutex_lock(&recent_mutex);
304 t = recent_table_lookup(info->name); 321 t = recent_table_lookup(recent_net, info->name);
305 if (t != NULL) { 322 if (t != NULL) {
306 t->refcnt++; 323 t->refcnt++;
307 ret = true; 324 ret = true;
@@ -318,7 +335,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
318 for (i = 0; i < ip_list_hash_size; i++) 335 for (i = 0; i < ip_list_hash_size; i++)
319 INIT_LIST_HEAD(&t->iphash[i]); 336 INIT_LIST_HEAD(&t->iphash[i]);
320#ifdef CONFIG_PROC_FS 337#ifdef CONFIG_PROC_FS
321 pde = proc_create_data(t->name, ip_list_perms, recent_proc_dir, 338 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
322 &recent_mt_fops, t); 339 &recent_mt_fops, t);
323 if (pde == NULL) { 340 if (pde == NULL) {
324 kfree(t); 341 kfree(t);
@@ -327,10 +344,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
327 pde->uid = ip_list_uid; 344 pde->uid = ip_list_uid;
328 pde->gid = ip_list_gid; 345 pde->gid = ip_list_gid;
329#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT 346#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
330 pde = proc_create_data(t->name, ip_list_perms, proc_old_dir, 347 pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent,
331 &recent_old_fops, t); 348 &recent_old_fops, t);
332 if (pde == NULL) { 349 if (pde == NULL) {
333 remove_proc_entry(t->name, proc_old_dir); 350 remove_proc_entry(t->name, recent_net->xt_recent);
334 kfree(t); 351 kfree(t);
335 goto out; 352 goto out;
336 } 353 }
@@ -339,7 +356,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
339#endif 356#endif
340#endif 357#endif
341 spin_lock_bh(&recent_lock); 358 spin_lock_bh(&recent_lock);
342 list_add_tail(&t->list, &tables); 359 list_add_tail(&t->list, &recent_net->tables);
343 spin_unlock_bh(&recent_lock); 360 spin_unlock_bh(&recent_lock);
344 ret = true; 361 ret = true;
345out: 362out:
@@ -349,20 +366,21 @@ out:
349 366
350static void recent_mt_destroy(const struct xt_mtdtor_param *par) 367static void recent_mt_destroy(const struct xt_mtdtor_param *par)
351{ 368{
369 struct recent_net *recent_net = recent_pernet(par->net);
352 const struct xt_recent_mtinfo *info = par->matchinfo; 370 const struct xt_recent_mtinfo *info = par->matchinfo;
353 struct recent_table *t; 371 struct recent_table *t;
354 372
355 mutex_lock(&recent_mutex); 373 mutex_lock(&recent_mutex);
356 t = recent_table_lookup(info->name); 374 t = recent_table_lookup(recent_net, info->name);
357 if (--t->refcnt == 0) { 375 if (--t->refcnt == 0) {
358 spin_lock_bh(&recent_lock); 376 spin_lock_bh(&recent_lock);
359 list_del(&t->list); 377 list_del(&t->list);
360 spin_unlock_bh(&recent_lock); 378 spin_unlock_bh(&recent_lock);
361#ifdef CONFIG_PROC_FS 379#ifdef CONFIG_PROC_FS
362#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT 380#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
363 remove_proc_entry(t->name, proc_old_dir); 381 remove_proc_entry(t->name, recent_net->ipt_recent);
364#endif 382#endif
365 remove_proc_entry(t->name, recent_proc_dir); 383 remove_proc_entry(t->name, recent_net->xt_recent);
366#endif 384#endif
367 recent_table_flush(t); 385 recent_table_flush(t);
368 kfree(t); 386 kfree(t);
@@ -611,8 +629,65 @@ static const struct file_operations recent_mt_fops = {
611 .release = seq_release_private, 629 .release = seq_release_private,
612 .owner = THIS_MODULE, 630 .owner = THIS_MODULE,
613}; 631};
632
633static int __net_init recent_proc_net_init(struct net *net)
634{
635 struct recent_net *recent_net = recent_pernet(net);
636
637 recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
638 if (!recent_net->xt_recent)
639 return -ENOMEM;
640#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
641 recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net);
642 if (!recent_net->ipt_recent) {
643 proc_net_remove(net, "xt_recent");
644 return -ENOMEM;
645 }
646#endif
647 return 0;
648}
649
650static void __net_exit recent_proc_net_exit(struct net *net)
651{
652#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
653 proc_net_remove(net, "ipt_recent");
654#endif
655 proc_net_remove(net, "xt_recent");
656}
657#else
658static inline int recent_proc_net_init(struct net *net)
659{
660 return 0;
661}
662
663static inline void recent_proc_net_exit(struct net *net)
664{
665}
614#endif /* CONFIG_PROC_FS */ 666#endif /* CONFIG_PROC_FS */
615 667
668static int __net_init recent_net_init(struct net *net)
669{
670 struct recent_net *recent_net = recent_pernet(net);
671
672 INIT_LIST_HEAD(&recent_net->tables);
673 return recent_proc_net_init(net);
674}
675
676static void __net_exit recent_net_exit(struct net *net)
677{
678 struct recent_net *recent_net = recent_pernet(net);
679
680 BUG_ON(!list_empty(&recent_net->tables));
681 recent_proc_net_exit(net);
682}
683
684static struct pernet_operations recent_net_ops = {
685 .init = recent_net_init,
686 .exit = recent_net_exit,
687 .id = &recent_net_id,
688 .size = sizeof(struct recent_net),
689};
690
616static struct xt_match recent_mt_reg[] __read_mostly = { 691static struct xt_match recent_mt_reg[] __read_mostly = {
617 { 692 {
618 .name = "recent", 693 .name = "recent",
@@ -644,39 +719,19 @@ static int __init recent_mt_init(void)
644 return -EINVAL; 719 return -EINVAL;
645 ip_list_hash_size = 1 << fls(ip_list_tot); 720 ip_list_hash_size = 1 << fls(ip_list_tot);
646 721
647 err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 722 err = register_pernet_subsys(&recent_net_ops);
648#ifdef CONFIG_PROC_FS
649 if (err) 723 if (err)
650 return err; 724 return err;
651 recent_proc_dir = proc_mkdir("xt_recent", init_net.proc_net); 725 err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
652 if (recent_proc_dir == NULL) { 726 if (err)
653 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 727 unregister_pernet_subsys(&recent_net_ops);
654 err = -ENOMEM;
655 }
656#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
657 if (err < 0)
658 return err;
659 proc_old_dir = proc_mkdir("ipt_recent", init_net.proc_net);
660 if (proc_old_dir == NULL) {
661 remove_proc_entry("xt_recent", init_net.proc_net);
662 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
663 err = -ENOMEM;
664 }
665#endif
666#endif
667 return err; 728 return err;
668} 729}
669 730
670static void __exit recent_mt_exit(void) 731static void __exit recent_mt_exit(void)
671{ 732{
672 BUG_ON(!list_empty(&tables));
673 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 733 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
674#ifdef CONFIG_PROC_FS 734 unregister_pernet_subsys(&recent_net_ops);
675#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
676 remove_proc_entry("ipt_recent", init_net.proc_net);
677#endif
678 remove_proc_entry("xt_recent", init_net.proc_net);
679#endif
680} 735}
681 736
682module_init(recent_mt_init); 737module_init(recent_mt_init);
diff --git a/net/netfilter/xt_repldata.h b/net/netfilter/xt_repldata.h
new file mode 100644
index 000000000000..6efe4e5a81c6
--- /dev/null
+++ b/net/netfilter/xt_repldata.h
@@ -0,0 +1,35 @@
1/*
2 * Today's hack: quantum tunneling in structs
3 *
4 * 'entries' and 'term' are never anywhere referenced by word in code. In fact,
5 * they serve as the hanging-off data accessed through repl.data[].
6 */
7
8#define xt_alloc_initial_table(type, typ2) ({ \
9 unsigned int hook_mask = info->valid_hooks; \
10 unsigned int nhooks = hweight32(hook_mask); \
11 unsigned int bytes = 0, hooknum = 0, i = 0; \
12 struct { \
13 struct type##_replace repl; \
14 struct type##_standard entries[nhooks]; \
15 struct type##_error term; \
16 } *tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); \
17 if (tbl == NULL) \
18 return NULL; \
19 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \
20 tbl->term = (struct type##_error)typ2##_ERROR_INIT; \
21 tbl->repl.valid_hooks = hook_mask; \
22 tbl->repl.num_entries = nhooks + 1; \
23 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \
24 sizeof(struct type##_error); \
25 for (; hook_mask != 0; hook_mask >>= 1, ++hooknum) { \
26 if (!(hook_mask & 1)) \
27 continue; \
28 tbl->repl.hook_entry[hooknum] = bytes; \
29 tbl->repl.underflow[hooknum] = bytes; \
30 tbl->entries[i++] = (struct type##_standard) \
31 typ2##_STANDARD_INIT(NF_ACCEPT); \
32 bytes += sizeof(struct type##_standard); \
33 } \
34 tbl; \
35})
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index d8c0f8f1a78e..937ce0633e99 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/net.h> 14#include <linux/net.h>
15#include <linux/slab.h>
15 16
16#include <linux/netfilter/xt_statistic.h> 17#include <linux/netfilter/xt_statistic.h>
17#include <linux/netfilter/x_tables.h> 18#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index b4d774111311..96801ffd8af8 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -7,6 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/gfp.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index e639298bc9c8..5f14c8462e30 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -33,6 +33,7 @@
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/audit.h> 35#include <linux/audit.h>
36#include <linux/slab.h>
36#include <net/sock.h> 37#include <net/sock.h>
37#include <net/netlink.h> 38#include <net/netlink.h>
38#include <net/genetlink.h> 39#include <net/genetlink.h>
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index c5d9f97ef217..d37b7f80fa37 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -35,6 +35,7 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/audit.h> 37#include <linux/audit.h>
38#include <linux/slab.h>
38#include <net/netlabel.h> 39#include <net/netlabel.h>
39#include <net/cipso_ipv4.h> 40#include <net/cipso_ipv4.h>
40#include <asm/bug.h> 41#include <asm/bug.h>
@@ -50,9 +51,12 @@ struct netlbl_domhsh_tbl {
50}; 51};
51 52
52/* Domain hash table */ 53/* Domain hash table */
53/* XXX - updates should be so rare that having one spinlock for the entire 54/* updates should be so rare that having one spinlock for the entire hash table
54 * hash table should be okay */ 55 * should be okay */
55static DEFINE_SPINLOCK(netlbl_domhsh_lock); 56static DEFINE_SPINLOCK(netlbl_domhsh_lock);
57#define netlbl_domhsh_rcu_deref(p) \
58 rcu_dereference_check(p, rcu_read_lock_held() || \
59 lockdep_is_held(&netlbl_domhsh_lock))
56static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; 60static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
57static struct netlbl_dom_map *netlbl_domhsh_def = NULL; 61static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
58 62
@@ -106,7 +110,8 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
106 * Description: 110 * Description:
107 * This is the hashing function for the domain hash table, it returns the 111 * This is the hashing function for the domain hash table, it returns the
108 * correct bucket number for the domain. The caller is responsibile for 112 * correct bucket number for the domain. The caller is responsibile for
109 * calling the rcu_read_[un]lock() functions. 113 * ensuring that the hash table is protected with either a RCU read lock or the
114 * hash table lock.
110 * 115 *
111 */ 116 */
112static u32 netlbl_domhsh_hash(const char *key) 117static u32 netlbl_domhsh_hash(const char *key)
@@ -120,7 +125,7 @@ static u32 netlbl_domhsh_hash(const char *key)
120 125
121 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) 126 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++)
122 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; 127 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter];
123 return val & (rcu_dereference(netlbl_domhsh)->size - 1); 128 return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1);
124} 129}
125 130
126/** 131/**
@@ -130,7 +135,8 @@ static u32 netlbl_domhsh_hash(const char *key)
130 * Description: 135 * Description:
131 * Searches the domain hash table and returns a pointer to the hash table 136 * Searches the domain hash table and returns a pointer to the hash table
132 * entry if found, otherwise NULL is returned. The caller is responsibile for 137 * entry if found, otherwise NULL is returned. The caller is responsibile for
133 * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()). 138 * ensuring that the hash table is protected with either a RCU read lock or the
139 * hash table lock.
134 * 140 *
135 */ 141 */
136static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) 142static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
@@ -141,7 +147,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
141 147
142 if (domain != NULL) { 148 if (domain != NULL) {
143 bkt = netlbl_domhsh_hash(domain); 149 bkt = netlbl_domhsh_hash(domain);
144 bkt_list = &rcu_dereference(netlbl_domhsh)->tbl[bkt]; 150 bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt];
145 list_for_each_entry_rcu(iter, bkt_list, list) 151 list_for_each_entry_rcu(iter, bkt_list, list)
146 if (iter->valid && strcmp(iter->domain, domain) == 0) 152 if (iter->valid && strcmp(iter->domain, domain) == 0)
147 return iter; 153 return iter;
@@ -159,8 +165,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
159 * Searches the domain hash table and returns a pointer to the hash table 165 * Searches the domain hash table and returns a pointer to the hash table
160 * entry if an exact match is found, if an exact match is not present in the 166 * entry if an exact match is found, if an exact match is not present in the
161 * hash table then the default entry is returned if valid otherwise NULL is 167 * hash table then the default entry is returned if valid otherwise NULL is
162 * returned. The caller is responsibile for the rcu hash table locks 168 * returned. The caller is responsibile ensuring that the hash table is
163 * (i.e. the caller much call rcu_read_[un]lock()). 169 * protected with either a RCU read lock or the hash table lock.
164 * 170 *
165 */ 171 */
166static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) 172static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
@@ -169,7 +175,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
169 175
170 entry = netlbl_domhsh_search(domain); 176 entry = netlbl_domhsh_search(domain);
171 if (entry == NULL) { 177 if (entry == NULL) {
172 entry = rcu_dereference(netlbl_domhsh_def); 178 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def);
173 if (entry != NULL && !entry->valid) 179 if (entry != NULL && !entry->valid)
174 entry = NULL; 180 entry = NULL;
175 } 181 }
@@ -306,8 +312,11 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
306 struct netlbl_af6list *tmp6; 312 struct netlbl_af6list *tmp6;
307#endif /* IPv6 */ 313#endif /* IPv6 */
308 314
315 /* XXX - we can remove this RCU read lock as the spinlock protects the
316 * entire function, but before we do we need to fixup the
317 * netlbl_af[4,6]list RCU functions to do "the right thing" with
318 * respect to rcu_dereference() when only a spinlock is held. */
309 rcu_read_lock(); 319 rcu_read_lock();
310
311 spin_lock(&netlbl_domhsh_lock); 320 spin_lock(&netlbl_domhsh_lock);
312 if (entry->domain != NULL) 321 if (entry->domain != NULL)
313 entry_old = netlbl_domhsh_search(entry->domain); 322 entry_old = netlbl_domhsh_search(entry->domain);
@@ -315,7 +324,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
315 entry_old = netlbl_domhsh_search_def(entry->domain); 324 entry_old = netlbl_domhsh_search_def(entry->domain);
316 if (entry_old == NULL) { 325 if (entry_old == NULL) {
317 entry->valid = 1; 326 entry->valid = 1;
318 INIT_RCU_HEAD(&entry->rcu);
319 327
320 if (entry->domain != NULL) { 328 if (entry->domain != NULL) {
321 u32 bkt = netlbl_domhsh_hash(entry->domain); 329 u32 bkt = netlbl_domhsh_hash(entry->domain);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 6ce00205f342..1b83e0009d8d 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/slab.h>
33#include <linux/audit.h> 34#include <linux/audit.h>
34#include <linux/in.h> 35#include <linux/in.h>
35#include <linux/in6.h> 36#include <linux/in6.h>
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 8203623e65ad..998e85e895d0 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -34,6 +34,7 @@
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/in.h> 35#include <linux/in.h>
36#include <linux/in6.h> 36#include <linux/in6.h>
37#include <linux/slab.h>
37#include <net/sock.h> 38#include <net/sock.h>
38#include <net/netlink.h> 39#include <net/netlink.h>
39#include <net/genetlink.h> 40#include <net/genetlink.h>
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 98ed22ee2ff4..a3d64aabe2f7 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -43,6 +43,7 @@
43#include <linux/notifier.h> 43#include <linux/notifier.h>
44#include <linux/netdevice.h> 44#include <linux/netdevice.h>
45#include <linux/security.h> 45#include <linux/security.h>
46#include <linux/slab.h>
46#include <net/sock.h> 47#include <net/sock.h>
47#include <net/netlink.h> 48#include <net/netlink.h>
48#include <net/genetlink.h> 49#include <net/genetlink.h>
@@ -114,6 +115,9 @@ struct netlbl_unlhsh_walk_arg {
114/* updates should be so rare that having one spinlock for the entire 115/* updates should be so rare that having one spinlock for the entire
115 * hash table should be okay */ 116 * hash table should be okay */
116static DEFINE_SPINLOCK(netlbl_unlhsh_lock); 117static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
118#define netlbl_unlhsh_rcu_deref(p) \
119 rcu_dereference_check(p, rcu_read_lock_held() || \
120 lockdep_is_held(&netlbl_unlhsh_lock))
117static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; 121static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
118static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; 122static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
119 123
@@ -235,15 +239,13 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
235 * Description: 239 * Description:
236 * This is the hashing function for the unlabeled hash table, it returns the 240 * This is the hashing function for the unlabeled hash table, it returns the
237 * bucket number for the given device/interface. The caller is responsible for 241 * bucket number for the given device/interface. The caller is responsible for
238 * calling the rcu_read_[un]lock() functions. 242 * ensuring that the hash table is protected with either a RCU read lock or
243 * the hash table lock.
239 * 244 *
240 */ 245 */
241static u32 netlbl_unlhsh_hash(int ifindex) 246static u32 netlbl_unlhsh_hash(int ifindex)
242{ 247{
243 /* this is taken _almost_ directly from 248 return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1);
244 * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much
245 * the same thing */
246 return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1);
247} 249}
248 250
249/** 251/**
@@ -253,7 +255,8 @@ static u32 netlbl_unlhsh_hash(int ifindex)
253 * Description: 255 * Description:
254 * Searches the unlabeled connection hash table and returns a pointer to the 256 * Searches the unlabeled connection hash table and returns a pointer to the
255 * interface entry which matches @ifindex, otherwise NULL is returned. The 257 * interface entry which matches @ifindex, otherwise NULL is returned. The
256 * caller is responsible for calling the rcu_read_[un]lock() functions. 258 * caller is responsible for ensuring that the hash table is protected with
259 * either a RCU read lock or the hash table lock.
257 * 260 *
258 */ 261 */
259static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) 262static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
@@ -263,7 +266,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
263 struct netlbl_unlhsh_iface *iter; 266 struct netlbl_unlhsh_iface *iter;
264 267
265 bkt = netlbl_unlhsh_hash(ifindex); 268 bkt = netlbl_unlhsh_hash(ifindex);
266 bkt_list = &rcu_dereference(netlbl_unlhsh)->tbl[bkt]; 269 bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt];
267 list_for_each_entry_rcu(iter, bkt_list, list) 270 list_for_each_entry_rcu(iter, bkt_list, list)
268 if (iter->valid && iter->ifindex == ifindex) 271 if (iter->valid && iter->ifindex == ifindex)
269 return iter; 272 return iter;
@@ -272,33 +275,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
272} 275}
273 276
274/** 277/**
275 * netlbl_unlhsh_search_iface_def - Search for a matching interface entry
276 * @ifindex: the network interface
277 *
278 * Description:
279 * Searches the unlabeled connection hash table and returns a pointer to the
280 * interface entry which matches @ifindex. If an exact match can not be found
281 * and there is a valid default entry, the default entry is returned, otherwise
282 * NULL is returned. The caller is responsible for calling the
283 * rcu_read_[un]lock() functions.
284 *
285 */
286static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex)
287{
288 struct netlbl_unlhsh_iface *entry;
289
290 entry = netlbl_unlhsh_search_iface(ifindex);
291 if (entry != NULL)
292 return entry;
293
294 entry = rcu_dereference(netlbl_unlhsh_def);
295 if (entry != NULL && entry->valid)
296 return entry;
297
298 return NULL;
299}
300
301/**
302 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table 278 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table
303 * @iface: the associated interface entry 279 * @iface: the associated interface entry
304 * @addr: IPv4 address in network byte order 280 * @addr: IPv4 address in network byte order
@@ -308,8 +284,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex)
308 * Description: 284 * Description:
309 * Add a new address entry into the unlabeled connection hash table using the 285 * Add a new address entry into the unlabeled connection hash table using the
310 * interface entry specified by @iface. On success zero is returned, otherwise 286 * interface entry specified by @iface. On success zero is returned, otherwise
311 * a negative value is returned. The caller is responsible for calling the 287 * a negative value is returned.
312 * rcu_read_[un]lock() functions.
313 * 288 *
314 */ 289 */
315static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, 290static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
@@ -327,7 +302,6 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
327 entry->list.addr = addr->s_addr & mask->s_addr; 302 entry->list.addr = addr->s_addr & mask->s_addr;
328 entry->list.mask = mask->s_addr; 303 entry->list.mask = mask->s_addr;
329 entry->list.valid = 1; 304 entry->list.valid = 1;
330 INIT_RCU_HEAD(&entry->rcu);
331 entry->secid = secid; 305 entry->secid = secid;
332 306
333 spin_lock(&netlbl_unlhsh_lock); 307 spin_lock(&netlbl_unlhsh_lock);
@@ -350,8 +324,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
350 * Description: 324 * Description:
351 * Add a new address entry into the unlabeled connection hash table using the 325 * Add a new address entry into the unlabeled connection hash table using the
352 * interface entry specified by @iface. On success zero is returned, otherwise 326 * interface entry specified by @iface. On success zero is returned, otherwise
353 * a negative value is returned. The caller is responsible for calling the 327 * a negative value is returned.
354 * rcu_read_[un]lock() functions.
355 * 328 *
356 */ 329 */
357static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, 330static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
@@ -373,7 +346,6 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
373 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; 346 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
374 ipv6_addr_copy(&entry->list.mask, mask); 347 ipv6_addr_copy(&entry->list.mask, mask);
375 entry->list.valid = 1; 348 entry->list.valid = 1;
376 INIT_RCU_HEAD(&entry->rcu);
377 entry->secid = secid; 349 entry->secid = secid;
378 350
379 spin_lock(&netlbl_unlhsh_lock); 351 spin_lock(&netlbl_unlhsh_lock);
@@ -393,8 +365,7 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
393 * Description: 365 * Description:
394 * Add a new, empty, interface entry into the unlabeled connection hash table. 366 * Add a new, empty, interface entry into the unlabeled connection hash table.
395 * On success a pointer to the new interface entry is returned, on failure NULL 367 * On success a pointer to the new interface entry is returned, on failure NULL
396 * is returned. The caller is responsible for calling the rcu_read_[un]lock() 368 * is returned.
397 * functions.
398 * 369 *
399 */ 370 */
400static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) 371static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
@@ -410,7 +381,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
410 INIT_LIST_HEAD(&iface->addr4_list); 381 INIT_LIST_HEAD(&iface->addr4_list);
411 INIT_LIST_HEAD(&iface->addr6_list); 382 INIT_LIST_HEAD(&iface->addr6_list);
412 iface->valid = 1; 383 iface->valid = 1;
413 INIT_RCU_HEAD(&iface->rcu);
414 384
415 spin_lock(&netlbl_unlhsh_lock); 385 spin_lock(&netlbl_unlhsh_lock);
416 if (ifindex > 0) { 386 if (ifindex > 0) {
@@ -418,10 +388,10 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
418 if (netlbl_unlhsh_search_iface(ifindex) != NULL) 388 if (netlbl_unlhsh_search_iface(ifindex) != NULL)
419 goto add_iface_failure; 389 goto add_iface_failure;
420 list_add_tail_rcu(&iface->list, 390 list_add_tail_rcu(&iface->list,
421 &rcu_dereference(netlbl_unlhsh)->tbl[bkt]); 391 &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]);
422 } else { 392 } else {
423 INIT_LIST_HEAD(&iface->list); 393 INIT_LIST_HEAD(&iface->list);
424 if (rcu_dereference(netlbl_unlhsh_def) != NULL) 394 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
425 goto add_iface_failure; 395 goto add_iface_failure;
426 rcu_assign_pointer(netlbl_unlhsh_def, iface); 396 rcu_assign_pointer(netlbl_unlhsh_def, iface);
427 } 397 }
@@ -551,8 +521,7 @@ unlhsh_add_return:
551 * 521 *
552 * Description: 522 * Description:
553 * Remove an IP address entry from the unlabeled connection hash table. 523 * Remove an IP address entry from the unlabeled connection hash table.
554 * Returns zero on success, negative values on failure. The caller is 524 * Returns zero on success, negative values on failure.
555 * responsible for calling the rcu_read_[un]lock() functions.
556 * 525 *
557 */ 526 */
558static int netlbl_unlhsh_remove_addr4(struct net *net, 527static int netlbl_unlhsh_remove_addr4(struct net *net,
@@ -614,8 +583,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
614 * 583 *
615 * Description: 584 * Description:
616 * Remove an IP address entry from the unlabeled connection hash table. 585 * Remove an IP address entry from the unlabeled connection hash table.
617 * Returns zero on success, negative values on failure. The caller is 586 * Returns zero on success, negative values on failure.
618 * responsible for calling the rcu_read_[un]lock() functions.
619 * 587 *
620 */ 588 */
621static int netlbl_unlhsh_remove_addr6(struct net *net, 589static int netlbl_unlhsh_remove_addr6(struct net *net,
@@ -1550,8 +1518,10 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
1550 struct netlbl_unlhsh_iface *iface; 1518 struct netlbl_unlhsh_iface *iface;
1551 1519
1552 rcu_read_lock(); 1520 rcu_read_lock();
1553 iface = netlbl_unlhsh_search_iface_def(skb->skb_iif); 1521 iface = netlbl_unlhsh_search_iface(skb->skb_iif);
1554 if (iface == NULL) 1522 if (iface == NULL)
1523 iface = rcu_dereference(netlbl_unlhsh_def);
1524 if (iface == NULL || !iface->valid)
1555 goto unlabel_getattr_nolabel; 1525 goto unlabel_getattr_nolabel;
1556 switch (family) { 1526 switch (family) {
1557 case PF_INET: { 1527 case PF_INET: {
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 68706b4e3bf8..a3fd75ac3fa5 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -35,6 +35,7 @@
35#include <linux/audit.h> 35#include <linux/audit.h>
36#include <linux/tty.h> 36#include <linux/tty.h>
37#include <linux/security.h> 37#include <linux/security.h>
38#include <linux/gfp.h>
38#include <net/sock.h> 39#include <net/sock.h>
39#include <net/netlink.h> 40#include <net/netlink.h>
40#include <net/genetlink.h> 41#include <net/genetlink.h>
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4c5972ba8c78..795424396aff 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -683,6 +683,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
683 struct netlink_sock *nlk = nlk_sk(sk); 683 struct netlink_sock *nlk = nlk_sk(sk);
684 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 684 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
685 685
686 if (alen < sizeof(addr->sa_family))
687 return -EINVAL;
688
686 if (addr->sa_family == AF_UNSPEC) { 689 if (addr->sa_family == AF_UNSPEC) {
687 sk->sk_state = NETLINK_UNCONNECTED; 690 sk->sk_state = NETLINK_UNCONNECTED;
688 nlk->dst_pid = 0; 691 nlk->dst_pid = 0;
@@ -1093,6 +1096,7 @@ static inline int do_one_set_err(struct sock *sk,
1093 struct netlink_set_err_data *p) 1096 struct netlink_set_err_data *p)
1094{ 1097{
1095 struct netlink_sock *nlk = nlk_sk(sk); 1098 struct netlink_sock *nlk = nlk_sk(sk);
1099 int ret = 0;
1096 1100
1097 if (sk == p->exclude_sk) 1101 if (sk == p->exclude_sk)
1098 goto out; 1102 goto out;
@@ -1104,10 +1108,15 @@ static inline int do_one_set_err(struct sock *sk,
1104 !test_bit(p->group - 1, nlk->groups)) 1108 !test_bit(p->group - 1, nlk->groups))
1105 goto out; 1109 goto out;
1106 1110
1111 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1112 ret = 1;
1113 goto out;
1114 }
1115
1107 sk->sk_err = p->code; 1116 sk->sk_err = p->code;
1108 sk->sk_error_report(sk); 1117 sk->sk_error_report(sk);
1109out: 1118out:
1110 return 0; 1119 return ret;
1111} 1120}
1112 1121
1113/** 1122/**
@@ -1116,12 +1125,16 @@ out:
1116 * @pid: the PID of a process that we want to skip (if any) 1125 * @pid: the PID of a process that we want to skip (if any)
1117 * @groups: the broadcast group that will notice the error 1126 * @groups: the broadcast group that will notice the error
1118 * @code: error code, must be negative (as usual in kernelspace) 1127 * @code: error code, must be negative (as usual in kernelspace)
1128 *
1129 * This function returns the number of broadcast listeners that have set the
1130 * NETLINK_RECV_NO_ENOBUFS socket option.
1119 */ 1131 */
1120void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) 1132int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1121{ 1133{
1122 struct netlink_set_err_data info; 1134 struct netlink_set_err_data info;
1123 struct hlist_node *node; 1135 struct hlist_node *node;
1124 struct sock *sk; 1136 struct sock *sk;
1137 int ret = 0;
1125 1138
1126 info.exclude_sk = ssk; 1139 info.exclude_sk = ssk;
1127 info.pid = pid; 1140 info.pid = pid;
@@ -1132,9 +1145,10 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1132 read_lock(&nl_table_lock); 1145 read_lock(&nl_table_lock);
1133 1146
1134 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1147 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1135 do_one_set_err(sk, &info); 1148 ret += do_one_set_err(sk, &info);
1136 1149
1137 read_unlock(&nl_table_lock); 1150 read_unlock(&nl_table_lock);
1151 return ret;
1138} 1152}
1139EXPORT_SYMBOL(netlink_set_err); 1153EXPORT_SYMBOL(netlink_set_err);
1140 1154
@@ -1978,12 +1992,12 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1978 if (v == SEQ_START_TOKEN) 1992 if (v == SEQ_START_TOKEN)
1979 seq_puts(seq, 1993 seq_puts(seq,
1980 "sk Eth Pid Groups " 1994 "sk Eth Pid Groups "
1981 "Rmem Wmem Dump Locks Drops\n"); 1995 "Rmem Wmem Dump Locks Drops Inode\n");
1982 else { 1996 else {
1983 struct sock *s = v; 1997 struct sock *s = v;
1984 struct netlink_sock *nlk = nlk_sk(s); 1998 struct netlink_sock *nlk = nlk_sk(s);
1985 1999
1986 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n", 2000 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
1987 s, 2001 s,
1988 s->sk_protocol, 2002 s->sk_protocol,
1989 nlk->pid, 2003 nlk->pid,
@@ -1992,7 +2006,8 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1992 sk_wmem_alloc_get(s), 2006 sk_wmem_alloc_get(s),
1993 nlk->cb, 2007 nlk->cb,
1994 atomic_read(&s->sk_refcnt), 2008 atomic_read(&s->sk_refcnt),
1995 atomic_read(&s->sk_drops) 2009 atomic_read(&s->sk_drops),
2010 sock_i_ino(s)
1996 ); 2011 );
1997 2012
1998 } 2013 }
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d07ecda0a92d..06438fa2b1e5 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/slab.h>
11#include <linux/errno.h> 12#include <linux/errno.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/socket.h> 14#include <linux/socket.h>
@@ -681,9 +682,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
681 int chains_to_skip = cb->args[0]; 682 int chains_to_skip = cb->args[0];
682 int fams_to_skip = cb->args[1]; 683 int fams_to_skip = cb->args[1];
683 684
684 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { 685 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
685 if (i < chains_to_skip)
686 continue;
687 n = 0; 686 n = 0;
688 list_for_each_entry(rt, genl_family_chain(i), family_list) { 687 list_for_each_entry(rt, genl_family_chain(i), family_list) {
689 if (!rt->netnsok && !net_eq(net, &init_net)) 688 if (!rt->netnsok && !net_eq(net, &init_net))
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 71604c6613b5..fa07f044b599 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/socket.h> 16#include <linux/socket.h>
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/slab.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
20#include <linux/timer.h> 21#include <linux/timer.h>
@@ -1267,28 +1268,13 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1267 1268
1268static void *nr_info_start(struct seq_file *seq, loff_t *pos) 1269static void *nr_info_start(struct seq_file *seq, loff_t *pos)
1269{ 1270{
1270 struct sock *s;
1271 struct hlist_node *node;
1272 int i = 1;
1273
1274 spin_lock_bh(&nr_list_lock); 1271 spin_lock_bh(&nr_list_lock);
1275 if (*pos == 0) 1272 return seq_hlist_start_head(&nr_list, *pos);
1276 return SEQ_START_TOKEN;
1277
1278 sk_for_each(s, node, &nr_list) {
1279 if (i == *pos)
1280 return s;
1281 ++i;
1282 }
1283 return NULL;
1284} 1273}
1285 1274
1286static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos) 1275static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
1287{ 1276{
1288 ++*pos; 1277 return seq_hlist_next(v, &nr_list, pos);
1289
1290 return (v == SEQ_START_TOKEN) ? sk_head(&nr_list)
1291 : sk_next((struct sock *)v);
1292} 1278}
1293 1279
1294static void nr_info_stop(struct seq_file *seq, void *v) 1280static void nr_info_stop(struct seq_file *seq, void *v)
@@ -1298,7 +1284,7 @@ static void nr_info_stop(struct seq_file *seq, void *v)
1298 1284
1299static int nr_info_show(struct seq_file *seq, void *v) 1285static int nr_info_show(struct seq_file *seq, void *v)
1300{ 1286{
1301 struct sock *s = v; 1287 struct sock *s = sk_entry(v);
1302 struct net_device *dev; 1288 struct net_device *dev;
1303 struct nr_sock *nr; 1289 struct nr_sock *nr;
1304 const char *devname; 1290 const char *devname;
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 7aa11b01b2e2..64e6dde9749d 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -19,6 +19,7 @@
19#include <linux/fcntl.h> 19#include <linux/fcntl.h>
20#include <linux/in.h> 20#include <linux/in.h>
21#include <linux/if_ether.h> /* For the statistics structure. */ 21#include <linux/if_ether.h> /* For the statistics structure. */
22#include <linux/slab.h>
22 23
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 68176483617f..6d4ef6d65b3d 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index f324d5df4186..94d4e922af53 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -7,6 +7,7 @@
7 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) 7 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
8 */ 8 */
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/slab.h>
10#include <linux/socket.h> 11#include <linux/socket.h>
11#include <linux/timer.h> 12#include <linux/timer.h>
12#include <net/ax25.h> 13#include <net/ax25.h>
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index e3e6c44e1890..607fddb4fdbb 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index e2e2d33cafdf..44059d0c8dd1 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/sockios.h> 18#include <linux/sockios.h>
19#include <linux/net.h> 19#include <linux/net.h>
20#include <linux/slab.h>
20#include <net/ax25.h> 21#include <net/ax25.h>
21#include <linux/inet.h> 22#include <linux/inet.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
@@ -863,33 +864,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
863 864
864static void *nr_node_start(struct seq_file *seq, loff_t *pos) 865static void *nr_node_start(struct seq_file *seq, loff_t *pos)
865{ 866{
866 struct nr_node *nr_node;
867 struct hlist_node *node;
868 int i = 1;
869
870 spin_lock_bh(&nr_node_list_lock); 867 spin_lock_bh(&nr_node_list_lock);
871 if (*pos == 0) 868 return seq_hlist_start_head(&nr_node_list, *pos);
872 return SEQ_START_TOKEN;
873
874 nr_node_for_each(nr_node, node, &nr_node_list) {
875 if (i == *pos)
876 return nr_node;
877 ++i;
878 }
879
880 return NULL;
881} 869}
882 870
883static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) 871static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
884{ 872{
885 struct hlist_node *node; 873 return seq_hlist_next(v, &nr_node_list, pos);
886 ++*pos;
887
888 node = (v == SEQ_START_TOKEN)
889 ? nr_node_list.first
890 : ((struct nr_node *)v)->node_node.next;
891
892 return hlist_entry(node, struct nr_node, node_node);
893} 874}
894 875
895static void nr_node_stop(struct seq_file *seq, void *v) 876static void nr_node_stop(struct seq_file *seq, void *v)
@@ -906,7 +887,9 @@ static int nr_node_show(struct seq_file *seq, void *v)
906 seq_puts(seq, 887 seq_puts(seq,
907 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); 888 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
908 else { 889 else {
909 struct nr_node *nr_node = v; 890 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
891 node_node);
892
910 nr_node_lock(nr_node); 893 nr_node_lock(nr_node);
911 seq_printf(seq, "%-9s %-7s %d %d", 894 seq_printf(seq, "%-9s %-7s %d %d",
912 ax2asc(buf, &nr_node->callsign), 895 ax2asc(buf, &nr_node->callsign),
@@ -949,31 +932,13 @@ const struct file_operations nr_nodes_fops = {
949 932
950static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) 933static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
951{ 934{
952 struct nr_neigh *nr_neigh;
953 struct hlist_node *node;
954 int i = 1;
955
956 spin_lock_bh(&nr_neigh_list_lock); 935 spin_lock_bh(&nr_neigh_list_lock);
957 if (*pos == 0) 936 return seq_hlist_start_head(&nr_neigh_list, *pos);
958 return SEQ_START_TOKEN;
959
960 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
961 if (i == *pos)
962 return nr_neigh;
963 }
964 return NULL;
965} 937}
966 938
967static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 939static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
968{ 940{
969 struct hlist_node *node; 941 return seq_hlist_next(v, &nr_neigh_list, pos);
970 ++*pos;
971
972 node = (v == SEQ_START_TOKEN)
973 ? nr_neigh_list.first
974 : ((struct nr_neigh *)v)->neigh_node.next;
975
976 return hlist_entry(node, struct nr_neigh, neigh_node);
977} 942}
978 943
979static void nr_neigh_stop(struct seq_file *seq, void *v) 944static void nr_neigh_stop(struct seq_file *seq, void *v)
@@ -989,8 +954,9 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
989 if (v == SEQ_START_TOKEN) 954 if (v == SEQ_START_TOKEN)
990 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); 955 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
991 else { 956 else {
992 struct nr_neigh *nr_neigh = v; 957 struct nr_neigh *nr_neigh;
993 958
959 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
994 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 960 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
995 nr_neigh->number, 961 nr_neigh->number,
996 ax2asc(buf, &nr_neigh->callsign), 962 ax2asc(buf, &nr_neigh->callsign),
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 04e7d0d2fd8f..6a947ae50dbd 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/sockios.h> 16#include <linux/sockios.h>
17#include <linux/net.h> 17#include <linux/net.h>
18#include <linux/slab.h>
18#include <net/ax25.h> 19#include <net/ax25.h>
19#include <linux/inet.h> 20#include <linux/inet.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
diff --git a/net/packet/Kconfig b/net/packet/Kconfig
index 34ff93ff894d..0060e3b396b7 100644
--- a/net/packet/Kconfig
+++ b/net/packet/Kconfig
@@ -14,13 +14,3 @@ config PACKET
14 be called af_packet. 14 be called af_packet.
15 15
16 If unsure, say Y. 16 If unsure, say Y.
17
18config PACKET_MMAP
19 bool "Packet socket: mmapped IO"
20 depends on PACKET
21 help
22 If you say Y here, the Packet protocol driver will use an IO
23 mechanism that results in faster communication.
24
25 If unsure, say N.
26
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f126d18dbdc4..243946d4809d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -60,6 +60,7 @@
60#include <linux/wireless.h> 60#include <linux/wireless.h>
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/kmod.h> 62#include <linux/kmod.h>
63#include <linux/slab.h>
63#include <net/net_namespace.h> 64#include <net/net_namespace.h>
64#include <net/ip.h> 65#include <net/ip.h>
65#include <net/protocol.h> 66#include <net/protocol.h>
@@ -80,6 +81,7 @@
80#include <linux/init.h> 81#include <linux/init.h>
81#include <linux/mutex.h> 82#include <linux/mutex.h>
82#include <linux/if_vlan.h> 83#include <linux/if_vlan.h>
84#include <linux/virtio_net.h>
83 85
84#ifdef CONFIG_INET 86#ifdef CONFIG_INET
85#include <net/inet_common.h> 87#include <net/inet_common.h>
@@ -156,7 +158,6 @@ struct packet_mreq_max {
156 unsigned char mr_address[MAX_ADDR_LEN]; 158 unsigned char mr_address[MAX_ADDR_LEN];
157}; 159};
158 160
159#ifdef CONFIG_PACKET_MMAP
160static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 161static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
161 int closing, int tx_ring); 162 int closing, int tx_ring);
162 163
@@ -176,7 +177,6 @@ struct packet_ring_buffer {
176 177
177struct packet_sock; 178struct packet_sock;
178static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 179static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
179#endif
180 180
181static void packet_flush_mclist(struct sock *sk); 181static void packet_flush_mclist(struct sock *sk);
182 182
@@ -184,26 +184,23 @@ struct packet_sock {
184 /* struct sock has to be the first member of packet_sock */ 184 /* struct sock has to be the first member of packet_sock */
185 struct sock sk; 185 struct sock sk;
186 struct tpacket_stats stats; 186 struct tpacket_stats stats;
187#ifdef CONFIG_PACKET_MMAP
188 struct packet_ring_buffer rx_ring; 187 struct packet_ring_buffer rx_ring;
189 struct packet_ring_buffer tx_ring; 188 struct packet_ring_buffer tx_ring;
190 int copy_thresh; 189 int copy_thresh;
191#endif
192 spinlock_t bind_lock; 190 spinlock_t bind_lock;
193 struct mutex pg_vec_lock; 191 struct mutex pg_vec_lock;
194 unsigned int running:1, /* prot_hook is attached*/ 192 unsigned int running:1, /* prot_hook is attached*/
195 auxdata:1, 193 auxdata:1,
196 origdev:1; 194 origdev:1,
195 has_vnet_hdr:1;
197 int ifindex; /* bound device */ 196 int ifindex; /* bound device */
198 __be16 num; 197 __be16 num;
199 struct packet_mclist *mclist; 198 struct packet_mclist *mclist;
200#ifdef CONFIG_PACKET_MMAP
201 atomic_t mapped; 199 atomic_t mapped;
202 enum tpacket_versions tp_version; 200 enum tpacket_versions tp_version;
203 unsigned int tp_hdrlen; 201 unsigned int tp_hdrlen;
204 unsigned int tp_reserve; 202 unsigned int tp_reserve;
205 unsigned int tp_loss:1; 203 unsigned int tp_loss:1;
206#endif
207 struct packet_type prot_hook ____cacheline_aligned_in_smp; 204 struct packet_type prot_hook ____cacheline_aligned_in_smp;
208}; 205};
209 206
@@ -217,8 +214,6 @@ struct packet_skb_cb {
217 214
218#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 215#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
219 216
220#ifdef CONFIG_PACKET_MMAP
221
222static void __packet_set_status(struct packet_sock *po, void *frame, int status) 217static void __packet_set_status(struct packet_sock *po, void *frame, int status)
223{ 218{
224 union { 219 union {
@@ -313,8 +308,6 @@ static inline void packet_increment_head(struct packet_ring_buffer *buff)
313 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 308 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
314} 309}
315 310
316#endif
317
318static inline struct packet_sock *pkt_sk(struct sock *sk) 311static inline struct packet_sock *pkt_sk(struct sock *sk)
319{ 312{
320 return (struct packet_sock *)sk; 313 return (struct packet_sock *)sk;
@@ -508,7 +501,7 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
508 struct sk_filter *filter; 501 struct sk_filter *filter;
509 502
510 rcu_read_lock_bh(); 503 rcu_read_lock_bh();
511 filter = rcu_dereference(sk->sk_filter); 504 filter = rcu_dereference_bh(sk->sk_filter);
512 if (filter != NULL) 505 if (filter != NULL)
513 res = sk_run_filter(skb, filter->insns, filter->len); 506 res = sk_run_filter(skb, filter->insns, filter->len);
514 rcu_read_unlock_bh(); 507 rcu_read_unlock_bh();
@@ -638,7 +631,6 @@ drop:
638 return 0; 631 return 0;
639} 632}
640 633
641#ifdef CONFIG_PACKET_MMAP
642static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 634static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
643 struct packet_type *pt, struct net_device *orig_dev) 635 struct packet_type *pt, struct net_device *orig_dev)
644{ 636{
@@ -1054,7 +1046,30 @@ out:
1054 mutex_unlock(&po->pg_vec_lock); 1046 mutex_unlock(&po->pg_vec_lock);
1055 return err; 1047 return err;
1056} 1048}
1057#endif 1049
1050static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1051 size_t reserve, size_t len,
1052 size_t linear, int noblock,
1053 int *err)
1054{
1055 struct sk_buff *skb;
1056
1057 /* Under a page? Don't bother with paged skb. */
1058 if (prepad + len < PAGE_SIZE || !linear)
1059 linear = len;
1060
1061 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1062 err);
1063 if (!skb)
1064 return NULL;
1065
1066 skb_reserve(skb, reserve);
1067 skb_put(skb, linear);
1068 skb->data_len = len - linear;
1069 skb->len += len - linear;
1070
1071 return skb;
1072}
1058 1073
1059static int packet_snd(struct socket *sock, 1074static int packet_snd(struct socket *sock,
1060 struct msghdr *msg, size_t len) 1075 struct msghdr *msg, size_t len)
@@ -1066,14 +1081,17 @@ static int packet_snd(struct socket *sock,
1066 __be16 proto; 1081 __be16 proto;
1067 unsigned char *addr; 1082 unsigned char *addr;
1068 int ifindex, err, reserve = 0; 1083 int ifindex, err, reserve = 0;
1084 struct virtio_net_hdr vnet_hdr = { 0 };
1085 int offset = 0;
1086 int vnet_hdr_len;
1087 struct packet_sock *po = pkt_sk(sk);
1088 unsigned short gso_type = 0;
1069 1089
1070 /* 1090 /*
1071 * Get and verify the address. 1091 * Get and verify the address.
1072 */ 1092 */
1073 1093
1074 if (saddr == NULL) { 1094 if (saddr == NULL) {
1075 struct packet_sock *po = pkt_sk(sk);
1076
1077 ifindex = po->ifindex; 1095 ifindex = po->ifindex;
1078 proto = po->num; 1096 proto = po->num;
1079 addr = NULL; 1097 addr = NULL;
@@ -1100,25 +1118,74 @@ static int packet_snd(struct socket *sock,
1100 if (!(dev->flags & IFF_UP)) 1118 if (!(dev->flags & IFF_UP))
1101 goto out_unlock; 1119 goto out_unlock;
1102 1120
1121 if (po->has_vnet_hdr) {
1122 vnet_hdr_len = sizeof(vnet_hdr);
1123
1124 err = -EINVAL;
1125 if (len < vnet_hdr_len)
1126 goto out_unlock;
1127
1128 len -= vnet_hdr_len;
1129
1130 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1131 vnet_hdr_len);
1132 if (err < 0)
1133 goto out_unlock;
1134
1135 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1136 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1137 vnet_hdr.hdr_len))
1138 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1139 vnet_hdr.csum_offset + 2;
1140
1141 err = -EINVAL;
1142 if (vnet_hdr.hdr_len > len)
1143 goto out_unlock;
1144
1145 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1146 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1147 case VIRTIO_NET_HDR_GSO_TCPV4:
1148 gso_type = SKB_GSO_TCPV4;
1149 break;
1150 case VIRTIO_NET_HDR_GSO_TCPV6:
1151 gso_type = SKB_GSO_TCPV6;
1152 break;
1153 case VIRTIO_NET_HDR_GSO_UDP:
1154 gso_type = SKB_GSO_UDP;
1155 break;
1156 default:
1157 goto out_unlock;
1158 }
1159
1160 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1161 gso_type |= SKB_GSO_TCP_ECN;
1162
1163 if (vnet_hdr.gso_size == 0)
1164 goto out_unlock;
1165
1166 }
1167 }
1168
1103 err = -EMSGSIZE; 1169 err = -EMSGSIZE;
1104 if (len > dev->mtu+reserve) 1170 if (!gso_type && (len > dev->mtu+reserve))
1105 goto out_unlock; 1171 goto out_unlock;
1106 1172
1107 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), 1173 err = -ENOBUFS;
1108 msg->msg_flags & MSG_DONTWAIT, &err); 1174 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1175 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1176 msg->msg_flags & MSG_DONTWAIT, &err);
1109 if (skb == NULL) 1177 if (skb == NULL)
1110 goto out_unlock; 1178 goto out_unlock;
1111 1179
1112 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 1180 skb_set_network_header(skb, reserve);
1113 skb_reset_network_header(skb);
1114 1181
1115 err = -EINVAL; 1182 err = -EINVAL;
1116 if (sock->type == SOCK_DGRAM && 1183 if (sock->type == SOCK_DGRAM &&
1117 dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0) 1184 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1118 goto out_free; 1185 goto out_free;
1119 1186
1120 /* Returns -EFAULT on error */ 1187 /* Returns -EFAULT on error */
1121 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1188 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1122 if (err) 1189 if (err)
1123 goto out_free; 1190 goto out_free;
1124 1191
@@ -1127,6 +1194,25 @@ static int packet_snd(struct socket *sock,
1127 skb->priority = sk->sk_priority; 1194 skb->priority = sk->sk_priority;
1128 skb->mark = sk->sk_mark; 1195 skb->mark = sk->sk_mark;
1129 1196
1197 if (po->has_vnet_hdr) {
1198 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1199 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1200 vnet_hdr.csum_offset)) {
1201 err = -EINVAL;
1202 goto out_free;
1203 }
1204 }
1205
1206 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1207 skb_shinfo(skb)->gso_type = gso_type;
1208
1209 /* Header must be checked, and gso_segs computed. */
1210 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1211 skb_shinfo(skb)->gso_segs = 0;
1212
1213 len += vnet_hdr_len;
1214 }
1215
1130 /* 1216 /*
1131 * Now send it 1217 * Now send it
1132 */ 1218 */
@@ -1151,13 +1237,11 @@ out:
1151static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, 1237static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1152 struct msghdr *msg, size_t len) 1238 struct msghdr *msg, size_t len)
1153{ 1239{
1154#ifdef CONFIG_PACKET_MMAP
1155 struct sock *sk = sock->sk; 1240 struct sock *sk = sock->sk;
1156 struct packet_sock *po = pkt_sk(sk); 1241 struct packet_sock *po = pkt_sk(sk);
1157 if (po->tx_ring.pg_vec) 1242 if (po->tx_ring.pg_vec)
1158 return tpacket_snd(po, msg); 1243 return tpacket_snd(po, msg);
1159 else 1244 else
1160#endif
1161 return packet_snd(sock, msg, len); 1245 return packet_snd(sock, msg, len);
1162} 1246}
1163 1247
@@ -1171,9 +1255,7 @@ static int packet_release(struct socket *sock)
1171 struct sock *sk = sock->sk; 1255 struct sock *sk = sock->sk;
1172 struct packet_sock *po; 1256 struct packet_sock *po;
1173 struct net *net; 1257 struct net *net;
1174#ifdef CONFIG_PACKET_MMAP
1175 struct tpacket_req req; 1258 struct tpacket_req req;
1176#endif
1177 1259
1178 if (!sk) 1260 if (!sk)
1179 return 0; 1261 return 0;
@@ -1181,28 +1263,25 @@ static int packet_release(struct socket *sock)
1181 net = sock_net(sk); 1263 net = sock_net(sk);
1182 po = pkt_sk(sk); 1264 po = pkt_sk(sk);
1183 1265
1184 write_lock_bh(&net->packet.sklist_lock); 1266 spin_lock_bh(&net->packet.sklist_lock);
1185 sk_del_node_init(sk); 1267 sk_del_node_init_rcu(sk);
1186 sock_prot_inuse_add(net, sk->sk_prot, -1); 1268 sock_prot_inuse_add(net, sk->sk_prot, -1);
1187 write_unlock_bh(&net->packet.sklist_lock); 1269 spin_unlock_bh(&net->packet.sklist_lock);
1188
1189 /*
1190 * Unhook packet receive handler.
1191 */
1192 1270
1271 spin_lock(&po->bind_lock);
1193 if (po->running) { 1272 if (po->running) {
1194 /* 1273 /*
1195 * Remove the protocol hook 1274 * Remove from protocol table
1196 */ 1275 */
1197 dev_remove_pack(&po->prot_hook);
1198 po->running = 0; 1276 po->running = 0;
1199 po->num = 0; 1277 po->num = 0;
1278 __dev_remove_pack(&po->prot_hook);
1200 __sock_put(sk); 1279 __sock_put(sk);
1201 } 1280 }
1281 spin_unlock(&po->bind_lock);
1202 1282
1203 packet_flush_mclist(sk); 1283 packet_flush_mclist(sk);
1204 1284
1205#ifdef CONFIG_PACKET_MMAP
1206 memset(&req, 0, sizeof(req)); 1285 memset(&req, 0, sizeof(req));
1207 1286
1208 if (po->rx_ring.pg_vec) 1287 if (po->rx_ring.pg_vec)
@@ -1210,12 +1289,11 @@ static int packet_release(struct socket *sock)
1210 1289
1211 if (po->tx_ring.pg_vec) 1290 if (po->tx_ring.pg_vec)
1212 packet_set_ring(sk, &req, 1, 1); 1291 packet_set_ring(sk, &req, 1, 1);
1213#endif
1214 1292
1293 synchronize_net();
1215 /* 1294 /*
1216 * Now the socket is dead. No more input will appear. 1295 * Now the socket is dead. No more input will appear.
1217 */ 1296 */
1218
1219 sock_orphan(sk); 1297 sock_orphan(sk);
1220 sock->sk = NULL; 1298 sock->sk = NULL;
1221 1299
@@ -1399,10 +1477,11 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
1399 po->running = 1; 1477 po->running = 1;
1400 } 1478 }
1401 1479
1402 write_lock_bh(&net->packet.sklist_lock); 1480 spin_lock_bh(&net->packet.sklist_lock);
1403 sk_add_node(sk, &net->packet.sklist); 1481 sk_add_node_rcu(sk, &net->packet.sklist);
1404 sock_prot_inuse_add(net, &packet_proto, 1); 1482 sock_prot_inuse_add(net, &packet_proto, 1);
1405 write_unlock_bh(&net->packet.sklist_lock); 1483 spin_unlock_bh(&net->packet.sklist_lock);
1484
1406 return 0; 1485 return 0;
1407out: 1486out:
1408 return err; 1487 return err;
@@ -1420,6 +1499,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1420 struct sk_buff *skb; 1499 struct sk_buff *skb;
1421 int copied, err; 1500 int copied, err;
1422 struct sockaddr_ll *sll; 1501 struct sockaddr_ll *sll;
1502 int vnet_hdr_len = 0;
1423 1503
1424 err = -EINVAL; 1504 err = -EINVAL;
1425 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 1505 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1451,6 +1531,48 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1451 if (skb == NULL) 1531 if (skb == NULL)
1452 goto out; 1532 goto out;
1453 1533
1534 if (pkt_sk(sk)->has_vnet_hdr) {
1535 struct virtio_net_hdr vnet_hdr = { 0 };
1536
1537 err = -EINVAL;
1538 vnet_hdr_len = sizeof(vnet_hdr);
1539 if ((len -= vnet_hdr_len) < 0)
1540 goto out_free;
1541
1542 if (skb_is_gso(skb)) {
1543 struct skb_shared_info *sinfo = skb_shinfo(skb);
1544
1545 /* This is a hint as to how much should be linear. */
1546 vnet_hdr.hdr_len = skb_headlen(skb);
1547 vnet_hdr.gso_size = sinfo->gso_size;
1548 if (sinfo->gso_type & SKB_GSO_TCPV4)
1549 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1550 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1551 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1552 else if (sinfo->gso_type & SKB_GSO_UDP)
1553 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1554 else if (sinfo->gso_type & SKB_GSO_FCOE)
1555 goto out_free;
1556 else
1557 BUG();
1558 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1559 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1560 } else
1561 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1562
1563 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1564 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1565 vnet_hdr.csum_start = skb->csum_start -
1566 skb_headroom(skb);
1567 vnet_hdr.csum_offset = skb->csum_offset;
1568 } /* else everything is zero */
1569
1570 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1571 vnet_hdr_len);
1572 if (err < 0)
1573 goto out_free;
1574 }
1575
1454 /* 1576 /*
1455 * If the address length field is there to be filled in, we fill 1577 * If the address length field is there to be filled in, we fill
1456 * it in now. 1578 * it in now.
@@ -1502,7 +1624,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1502 * Free or return the buffer as appropriate. Again this 1624 * Free or return the buffer as appropriate. Again this
1503 * hides all the races and re-entrancy issues from us. 1625 * hides all the races and re-entrancy issues from us.
1504 */ 1626 */
1505 err = (flags&MSG_TRUNC) ? skb->len : copied; 1627 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1506 1628
1507out_free: 1629out_free:
1508 skb_free_datagram(sk, skb); 1630 skb_free_datagram(sk, skb);
@@ -1567,6 +1689,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1567{ 1689{
1568 switch (i->type) { 1690 switch (i->type) {
1569 case PACKET_MR_MULTICAST: 1691 case PACKET_MR_MULTICAST:
1692 if (i->alen != dev->addr_len)
1693 return -EINVAL;
1570 if (what > 0) 1694 if (what > 0)
1571 return dev_mc_add(dev, i->addr, i->alen, 0); 1695 return dev_mc_add(dev, i->addr, i->alen, 0);
1572 else 1696 else
@@ -1579,6 +1703,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1579 return dev_set_allmulti(dev, what); 1703 return dev_set_allmulti(dev, what);
1580 break; 1704 break;
1581 case PACKET_MR_UNICAST: 1705 case PACKET_MR_UNICAST:
1706 if (i->alen != dev->addr_len)
1707 return -EINVAL;
1582 if (what > 0) 1708 if (what > 0)
1583 return dev_unicast_add(dev, i->addr); 1709 return dev_unicast_add(dev, i->addr);
1584 else 1710 else
@@ -1732,7 +1858,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1732 return ret; 1858 return ret;
1733 } 1859 }
1734 1860
1735#ifdef CONFIG_PACKET_MMAP
1736 case PACKET_RX_RING: 1861 case PACKET_RX_RING:
1737 case PACKET_TX_RING: 1862 case PACKET_TX_RING:
1738 { 1863 {
@@ -1740,6 +1865,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1740 1865
1741 if (optlen < sizeof(req)) 1866 if (optlen < sizeof(req))
1742 return -EINVAL; 1867 return -EINVAL;
1868 if (pkt_sk(sk)->has_vnet_hdr)
1869 return -EINVAL;
1743 if (copy_from_user(&req, optval, sizeof(req))) 1870 if (copy_from_user(&req, optval, sizeof(req)))
1744 return -EFAULT; 1871 return -EFAULT;
1745 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); 1872 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
@@ -1801,7 +1928,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1801 po->tp_loss = !!val; 1928 po->tp_loss = !!val;
1802 return 0; 1929 return 0;
1803 } 1930 }
1804#endif
1805 case PACKET_AUXDATA: 1931 case PACKET_AUXDATA:
1806 { 1932 {
1807 int val; 1933 int val;
@@ -1826,6 +1952,22 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1826 po->origdev = !!val; 1952 po->origdev = !!val;
1827 return 0; 1953 return 0;
1828 } 1954 }
1955 case PACKET_VNET_HDR:
1956 {
1957 int val;
1958
1959 if (sock->type != SOCK_RAW)
1960 return -EINVAL;
1961 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1962 return -EBUSY;
1963 if (optlen < sizeof(val))
1964 return -EINVAL;
1965 if (copy_from_user(&val, optval, sizeof(val)))
1966 return -EFAULT;
1967
1968 po->has_vnet_hdr = !!val;
1969 return 0;
1970 }
1829 default: 1971 default:
1830 return -ENOPROTOOPT; 1972 return -ENOPROTOOPT;
1831 } 1973 }
@@ -1876,7 +2018,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1876 2018
1877 data = &val; 2019 data = &val;
1878 break; 2020 break;
1879#ifdef CONFIG_PACKET_MMAP 2021 case PACKET_VNET_HDR:
2022 if (len > sizeof(int))
2023 len = sizeof(int);
2024 val = po->has_vnet_hdr;
2025
2026 data = &val;
2027 break;
1880 case PACKET_VERSION: 2028 case PACKET_VERSION:
1881 if (len > sizeof(int)) 2029 if (len > sizeof(int))
1882 len = sizeof(int); 2030 len = sizeof(int);
@@ -1912,7 +2060,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1912 val = po->tp_loss; 2060 val = po->tp_loss;
1913 data = &val; 2061 data = &val;
1914 break; 2062 break;
1915#endif
1916 default: 2063 default:
1917 return -ENOPROTOOPT; 2064 return -ENOPROTOOPT;
1918 } 2065 }
@@ -1932,8 +2079,8 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1932 struct net_device *dev = data; 2079 struct net_device *dev = data;
1933 struct net *net = dev_net(dev); 2080 struct net *net = dev_net(dev);
1934 2081
1935 read_lock(&net->packet.sklist_lock); 2082 rcu_read_lock();
1936 sk_for_each(sk, node, &net->packet.sklist) { 2083 sk_for_each_rcu(sk, node, &net->packet.sklist) {
1937 struct packet_sock *po = pkt_sk(sk); 2084 struct packet_sock *po = pkt_sk(sk);
1938 2085
1939 switch (msg) { 2086 switch (msg) {
@@ -1961,18 +2108,19 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1961 } 2108 }
1962 break; 2109 break;
1963 case NETDEV_UP: 2110 case NETDEV_UP:
1964 spin_lock(&po->bind_lock); 2111 if (dev->ifindex == po->ifindex) {
1965 if (dev->ifindex == po->ifindex && po->num && 2112 spin_lock(&po->bind_lock);
1966 !po->running) { 2113 if (po->num && !po->running) {
1967 dev_add_pack(&po->prot_hook); 2114 dev_add_pack(&po->prot_hook);
1968 sock_hold(sk); 2115 sock_hold(sk);
1969 po->running = 1; 2116 po->running = 1;
2117 }
2118 spin_unlock(&po->bind_lock);
1970 } 2119 }
1971 spin_unlock(&po->bind_lock);
1972 break; 2120 break;
1973 } 2121 }
1974 } 2122 }
1975 read_unlock(&net->packet.sklist_lock); 2123 rcu_read_unlock();
1976 return NOTIFY_DONE; 2124 return NOTIFY_DONE;
1977} 2125}
1978 2126
@@ -2021,8 +2169,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
2021 case SIOCGIFDSTADDR: 2169 case SIOCGIFDSTADDR:
2022 case SIOCSIFDSTADDR: 2170 case SIOCSIFDSTADDR:
2023 case SIOCSIFFLAGS: 2171 case SIOCSIFFLAGS:
2024 if (!net_eq(sock_net(sk), &init_net))
2025 return -ENOIOCTLCMD;
2026 return inet_dgram_ops.ioctl(sock, cmd, arg); 2172 return inet_dgram_ops.ioctl(sock, cmd, arg);
2027#endif 2173#endif
2028 2174
@@ -2032,11 +2178,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
2032 return 0; 2178 return 0;
2033} 2179}
2034 2180
2035#ifndef CONFIG_PACKET_MMAP
2036#define packet_mmap sock_no_mmap
2037#define packet_poll datagram_poll
2038#else
2039
2040static unsigned int packet_poll(struct file *file, struct socket *sock, 2181static unsigned int packet_poll(struct file *file, struct socket *sock,
2041 poll_table *wait) 2182 poll_table *wait)
2042{ 2183{
@@ -2318,8 +2459,6 @@ out:
2318 mutex_unlock(&po->pg_vec_lock); 2459 mutex_unlock(&po->pg_vec_lock);
2319 return err; 2460 return err;
2320} 2461}
2321#endif
2322
2323 2462
2324static const struct proto_ops packet_ops_spkt = { 2463static const struct proto_ops packet_ops_spkt = {
2325 .family = PF_PACKET, 2464 .family = PF_PACKET,
@@ -2374,40 +2513,26 @@ static struct notifier_block packet_netdev_notifier = {
2374}; 2513};
2375 2514
2376#ifdef CONFIG_PROC_FS 2515#ifdef CONFIG_PROC_FS
2377static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
2378{
2379 struct sock *s;
2380 struct hlist_node *node;
2381
2382 sk_for_each(s, node, &net->packet.sklist) {
2383 if (!off--)
2384 return s;
2385 }
2386 return NULL;
2387}
2388 2516
2389static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 2517static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2390 __acquires(seq_file_net(seq)->packet.sklist_lock) 2518 __acquires(RCU)
2391{ 2519{
2392 struct net *net = seq_file_net(seq); 2520 struct net *net = seq_file_net(seq);
2393 read_lock(&net->packet.sklist_lock); 2521
2394 return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN; 2522 rcu_read_lock();
2523 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2395} 2524}
2396 2525
2397static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2526static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2398{ 2527{
2399 struct net *net = seq_file_net(seq); 2528 struct net *net = seq_file_net(seq);
2400 ++*pos; 2529 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2401 return (v == SEQ_START_TOKEN)
2402 ? sk_head(&net->packet.sklist)
2403 : sk_next((struct sock *)v) ;
2404} 2530}
2405 2531
2406static void packet_seq_stop(struct seq_file *seq, void *v) 2532static void packet_seq_stop(struct seq_file *seq, void *v)
2407 __releases(seq_file_net(seq)->packet.sklist_lock) 2533 __releases(RCU)
2408{ 2534{
2409 struct net *net = seq_file_net(seq); 2535 rcu_read_unlock();
2410 read_unlock(&net->packet.sklist_lock);
2411} 2536}
2412 2537
2413static int packet_seq_show(struct seq_file *seq, void *v) 2538static int packet_seq_show(struct seq_file *seq, void *v)
@@ -2415,7 +2540,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2415 if (v == SEQ_START_TOKEN) 2540 if (v == SEQ_START_TOKEN)
2416 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 2541 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2417 else { 2542 else {
2418 struct sock *s = v; 2543 struct sock *s = sk_entry(v);
2419 const struct packet_sock *po = pkt_sk(s); 2544 const struct packet_sock *po = pkt_sk(s);
2420 2545
2421 seq_printf(seq, 2546 seq_printf(seq,
@@ -2457,9 +2582,9 @@ static const struct file_operations packet_seq_fops = {
2457 2582
2458#endif 2583#endif
2459 2584
2460static int packet_net_init(struct net *net) 2585static int __net_init packet_net_init(struct net *net)
2461{ 2586{
2462 rwlock_init(&net->packet.sklist_lock); 2587 spin_lock_init(&net->packet.sklist_lock);
2463 INIT_HLIST_HEAD(&net->packet.sklist); 2588 INIT_HLIST_HEAD(&net->packet.sklist);
2464 2589
2465 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) 2590 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
@@ -2468,7 +2593,7 @@ static int packet_net_init(struct net *net)
2468 return 0; 2593 return 0;
2469} 2594}
2470 2595
2471static void packet_net_exit(struct net *net) 2596static void __net_exit packet_net_exit(struct net *net)
2472{ 2597{
2473 proc_net_remove(net, "packet"); 2598 proc_net_remove(net, "packet");
2474} 2599}
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 526d0273991a..73aee7f2fcdc 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/slab.h>
28#include <asm/unaligned.h> 29#include <asm/unaligned.h>
29#include <net/sock.h> 30#include <net/sock.h>
30 31
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 67f072e94d00..1bd38db4fe1e 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/slab.h>
27#include <linux/socket.h> 28#include <linux/socket.h>
28#include <asm/ioctls.h> 29#include <asm/ioctls.h>
29#include <net/sock.h> 30#include <net/sock.h>
@@ -75,7 +76,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
75 struct sk_buff *skb; 76 struct sk_buff *skb;
76 int err; 77 int err;
77 78
78 if (msg->msg_flags & MSG_OOB) 79 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
80 MSG_CMSG_COMPAT))
79 return -EOPNOTSUPP; 81 return -EOPNOTSUPP;
80 82
81 if (msg->msg_name == NULL) 83 if (msg->msg_name == NULL)
@@ -119,7 +121,8 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
119 int rval = -EOPNOTSUPP; 121 int rval = -EOPNOTSUPP;
120 int copylen; 122 int copylen;
121 123
122 if (flags & MSG_OOB) 124 if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
125 MSG_CMSG_COMPAT))
123 goto out_nofree; 126 goto out_nofree;
124 127
125 if (addr_len) 128 if (addr_len)
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d183509d3fa6..d01208968c83 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -96,11 +96,11 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
96 goto drop; 96 goto drop;
97 } 97 }
98 98
99 if (likely(skb_headroom(skb) & 3)) { 99 if (skb_headroom(skb) & 3) {
100 struct sk_buff *rskb, *fs; 100 struct sk_buff *rskb, *fs;
101 int flen = 0; 101 int flen = 0;
102 102
103 /* Phonet Pipe data header is misaligned (3 bytes), 103 /* Phonet Pipe data header may be misaligned (3 bytes),
104 * so wrap the IP packet as a single fragment of an head-less 104 * so wrap the IP packet as a single fragment of an head-less
105 * socket buffer. The network stack will pull what it needs, 105 * socket buffer. The network stack will pull what it needs,
106 * but at least, the whole IP payload is not memcpy'd. */ 106 * but at least, the whole IP payload is not memcpy'd. */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b6356f3832f6..e2a95762abd3 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <linux/socket.h> 27#include <linux/socket.h>
27#include <net/sock.h> 28#include <net/sock.h>
28#include <net/tcp_states.h> 29#include <net/tcp_states.h>
@@ -354,6 +355,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
354 queue = &pn->ctrlreq_queue; 355 queue = &pn->ctrlreq_queue;
355 goto queue; 356 goto queue;
356 357
358 case PNS_PIPE_ALIGNED_DATA:
359 __skb_pull(skb, 1);
360 /* fall through */
357 case PNS_PIPE_DATA: 361 case PNS_PIPE_DATA:
358 __skb_pull(skb, 3); /* Pipe data header */ 362 __skb_pull(skb, 3); /* Pipe data header */
359 if (!pn_flow_safe(pn->rx_fc)) { 363 if (!pn_flow_safe(pn->rx_fc)) {
@@ -441,6 +445,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
441 struct sockaddr_pn dst; 445 struct sockaddr_pn dst;
442 u16 peer_type; 446 u16 peer_type;
443 u8 pipe_handle, enabled, n_sb; 447 u8 pipe_handle, enabled, n_sb;
448 u8 aligned = 0;
444 449
445 if (!pskb_pull(skb, sizeof(*hdr) + 4)) 450 if (!pskb_pull(skb, sizeof(*hdr) + 4))
446 return -EINVAL; 451 return -EINVAL;
@@ -479,6 +484,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
479 return -EINVAL; 484 return -EINVAL;
480 peer_type = (peer_type & 0xff00) | data[0]; 485 peer_type = (peer_type & 0xff00) | data[0];
481 break; 486 break;
487 case PN_PIPE_SB_ALIGNED_DATA:
488 aligned = data[0] != 0;
489 break;
482 } 490 }
483 n_sb--; 491 n_sb--;
484 } 492 }
@@ -510,6 +518,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
510 newpn->rx_credits = 0; 518 newpn->rx_credits = 0;
511 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 519 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
512 newpn->init_enable = enabled; 520 newpn->init_enable = enabled;
521 newpn->aligned = aligned;
513 522
514 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); 523 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
515 skb_queue_head(&newsk->sk_receive_queue, skb); 524 skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -829,11 +838,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
829 return -ENOBUFS; 838 return -ENOBUFS;
830 } 839 }
831 840
832 skb_push(skb, 3); 841 skb_push(skb, 3 + pn->aligned);
833 skb_reset_transport_header(skb); 842 skb_reset_transport_header(skb);
834 ph = pnp_hdr(skb); 843 ph = pnp_hdr(skb);
835 ph->utid = 0; 844 ph->utid = 0;
836 ph->message_id = PNS_PIPE_DATA; 845 if (pn->aligned) {
846 ph->message_id = PNS_PIPE_ALIGNED_DATA;
847 ph->data[0] = 0; /* padding */
848 } else
849 ph->message_id = PNS_PIPE_DATA;
837 ph->pipe_handle = pn->pipe_handle; 850 ph->pipe_handle = pn->pipe_handle;
838 851
839 return pn_skb_send(sk, skb, &pipe_srv); 852 return pn_skb_send(sk, skb, &pipe_srv);
@@ -848,7 +861,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
848 int flags = msg->msg_flags; 861 int flags = msg->msg_flags;
849 int err, done; 862 int err, done;
850 863
851 if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR)) 864 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
865 MSG_CMSG_COMPAT)) ||
866 !(msg->msg_flags & MSG_EOR))
852 return -EOPNOTSUPP; 867 return -EOPNOTSUPP;
853 868
854 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 869 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
@@ -927,6 +942,9 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
927 struct sk_buff *rskb, *fs; 942 struct sk_buff *rskb, *fs;
928 int flen = 0; 943 int flen = 0;
929 944
945 if (pep_sk(sk)->aligned)
946 return pipe_skb_send(sk, skb);
947
930 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC); 948 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
931 if (!rskb) { 949 if (!rskb) {
932 kfree_skb(skb); 950 kfree_skb(skb);
@@ -966,6 +984,10 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
966 struct sk_buff *skb; 984 struct sk_buff *skb;
967 int err; 985 int err;
968 986
987 if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
988 MSG_NOSIGNAL|MSG_CMSG_COMPAT))
989 return -EOPNOTSUPP;
990
969 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) 991 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
970 return -ENOTCONN; 992 return -ENOTCONN;
971 993
@@ -973,6 +995,8 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
973 /* Dequeue and acknowledge control request */ 995 /* Dequeue and acknowledge control request */
974 struct pep_sock *pn = pep_sk(sk); 996 struct pep_sock *pn = pep_sk(sk);
975 997
998 if (flags & MSG_PEEK)
999 return -EOPNOTSUPP;
976 skb = skb_dequeue(&pn->ctrlreq_queue); 1000 skb = skb_dequeue(&pn->ctrlreq_queue);
977 if (skb) { 1001 if (skb) {
978 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, 1002 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bc4a33bf2d3d..9b4ced6e0968 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/slab.h>
28#include <linux/netdevice.h> 29#include <linux/netdevice.h>
29#include <linux/phonet.h> 30#include <linux/phonet.h>
30#include <linux/proc_fs.h> 31#include <linux/proc_fs.h>
@@ -107,8 +108,7 @@ static void phonet_device_destroy(struct net_device *dev)
107 if (pnd) { 108 if (pnd) {
108 u8 addr; 109 u8 addr;
109 110
110 for (addr = find_first_bit(pnd->addrs, 64); addr < 64; 111 for_each_set_bit(addr, pnd->addrs, 64)
111 addr = find_next_bit(pnd->addrs, 64, 1+addr))
112 phonet_address_notify(RTM_DELADDR, dev, addr); 112 phonet_address_notify(RTM_DELADDR, dev, addr);
113 kfree(pnd); 113 kfree(pnd);
114 } 114 }
@@ -311,7 +311,7 @@ static struct notifier_block phonet_device_notifier = {
311}; 311};
312 312
313/* Per-namespace Phonet devices handling */ 313/* Per-namespace Phonet devices handling */
314static int phonet_init_net(struct net *net) 314static int __net_init phonet_init_net(struct net *net)
315{ 315{
316 struct phonet_net *pnn = net_generic(net, phonet_net_id); 316 struct phonet_net *pnn = net_generic(net, phonet_net_id);
317 317
@@ -324,7 +324,7 @@ static int phonet_init_net(struct net *net)
324 return 0; 324 return 0;
325} 325}
326 326
327static void phonet_exit_net(struct net *net) 327static void __net_exit phonet_exit_net(struct net *net)
328{ 328{
329 struct phonet_net *pnn = net_generic(net, phonet_net_id); 329 struct phonet_net *pnn = net_generic(net, phonet_net_id);
330 struct net_device *dev; 330 struct net_device *dev;
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 2e6c7eb8e76a..58b3b1f991ed 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/netlink.h> 27#include <linux/netlink.h>
28#include <linux/phonet.h> 28#include <linux/phonet.h>
29#include <linux/slab.h>
29#include <net/sock.h> 30#include <net/sock.h>
30#include <net/phonet/pn_dev.h> 31#include <net/phonet/pn_dev.h>
31 32
@@ -141,8 +142,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
141 continue; 142 continue;
142 143
143 addr_idx = 0; 144 addr_idx = 0;
144 for (addr = find_first_bit(pnd->addrs, 64); addr < 64; 145 for_each_set_bit(addr, pnd->addrs, 64) {
145 addr = find_next_bit(pnd->addrs, 64, 1+addr)) {
146 if (addr_idx++ < addr_start_idx) 146 if (addr_idx++ < addr_start_idx)
147 continue; 147 continue;
148 148
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 69c8b826a0ce..c785bfd0744f 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -23,6 +23,7 @@
23 * 02110-1301 USA 23 * 02110-1301 USA
24 */ 24 */
25 25
26#include <linux/gfp.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/net.h> 28#include <linux/net.h>
28#include <linux/poll.h> 29#include <linux/poll.h>
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 853c52be781f..f81862baf4d0 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/gfp.h>
36#include <linux/in.h> 37#include <linux/in.h>
37#include <linux/poll.h> 38#include <linux/poll.h>
38#include <net/sock.h> 39#include <net/sock.h>
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 6d06cac2649c..f1da27ceb064 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -30,6 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 * 31 *
32 */ 32 */
33#include <linux/slab.h>
33#include <linux/types.h> 34#include <linux/types.h>
34#include <linux/rbtree.h> 35#include <linux/rbtree.h>
35 36
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 278f607ab603..7619b671ca28 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/slab.h>
35#include <net/inet_hashtables.h> 36#include <net/inet_hashtables.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 3b8992361042..8f2d6dd7700a 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -37,6 +37,7 @@
37#include <linux/inetdevice.h> 37#include <linux/inetdevice.h>
38#include <linux/if_arp.h> 38#include <linux/if_arp.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/slab.h>
40 41
41#include "rds.h" 42#include "rds.h"
42#include "ib.h" 43#include "ib.h"
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 647cb8ffc39b..88d0856cb797 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/slab.h>
35#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 4b0da865a72c..059989fdb7d7 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34 35
35#include "rds.h" 36#include "rds.h"
36#include "rdma.h" 37#include "rdma.h"
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 04dc0d3f3c95..c7dd11b835f0 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <rdma/rdma_cm.h> 37#include <rdma/rdma_cm.h>
diff --git a/net/rds/info.c b/net/rds/info.c
index 814a91a6f4a7..c45c4173a44d 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/percpu.h> 33#include <linux/percpu.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/slab.h>
35#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/iw.c b/net/rds/iw.c
index b28fa8525b24..c8f3d3525cb9 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -37,6 +37,7 @@
37#include <linux/inetdevice.h> 37#include <linux/inetdevice.h>
38#include <linux/if_arp.h> 38#include <linux/if_arp.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/slab.h>
40 41
41#include "rds.h" 42#include "rds.h"
42#include "iw.h" 43#include "iw.h"
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 394cf6b4d0aa..3e9460f935d8 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/slab.h>
35#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
36 37
37#include "rds.h" 38#include "rds.h"
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 9eda11cca956..13dc1862d862 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34 35
35#include "rds.h" 36#include "rds.h"
36#include "rdma.h" 37#include "rdma.h"
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 54af7d6b92da..da43ee840ca3 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <rdma/rdma_cm.h> 37#include <rdma/rdma_cm.h>
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 4a61997f554d..0d7a159158b8 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/in.h> 35#include <linux/in.h>
35 36
36#include "rds.h" 37#include "rds.h"
diff --git a/net/rds/message.c b/net/rds/message.c
index 73e600ffd87f..9a1d67e001ba 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34 35
35#include "rds.h" 36#include "rds.h"
36#include "rdma.h" 37#include "rdma.h"
diff --git a/net/rds/page.c b/net/rds/page.c
index 36790122dfd4..595a952d4b17 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/highmem.h> 33#include <linux/highmem.h>
34#include <linux/gfp.h>
34 35
35#include "rds.h" 36#include "rds.h"
36 37
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 4c64daa1f5d5..5ce9437cad67 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/slab.h>
34#include <linux/rbtree.h> 35#include <linux/rbtree.h>
35#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ 36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
36 37
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9ece910ea394..7b155081b4dc 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -134,7 +134,7 @@ static int __init rds_rdma_listen_init(void)
134 ret = PTR_ERR(cm_id); 134 ret = PTR_ERR(cm_id);
135 printk(KERN_ERR "RDS/RDMA: failed to setup listener, " 135 printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
136 "rdma_create_id() returned %d\n", ret); 136 "rdma_create_id() returned %d\n", ret);
137 goto out; 137 return ret;
138 } 138 }
139 139
140 sin.sin_family = AF_INET, 140 sin.sin_family = AF_INET,
diff --git a/net/rds/recv.c b/net/rds/recv.c
index b426d67f760c..e2a2b9344f7b 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <linux/in.h> 36#include <linux/in.h>
36 37
diff --git a/net/rds/send.c b/net/rds/send.c
index b2fccfc20769..f04b929ded92 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/gfp.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <linux/in.h> 36#include <linux/in.h>
36#include <linux/list.h> 37#include <linux/list.h>
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index b5198aee45d3..babf4577ff7d 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <linux/in.h> 35#include <linux/in.h>
35#include <net/tcp.h> 36#include <net/tcp.h>
36 37
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 211522f9a9a2..056256285987 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -90,8 +90,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
90 90
91 ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src)); 91 ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src));
92 if (ret) { 92 if (ret) {
93 rdsdebug("bind failed with %d at address %u.%u.%u.%u\n", 93 rdsdebug("bind failed with %d at address %pI4\n",
94 ret, NIPQUAD(conn->c_laddr)); 94 ret, &conn->c_laddr);
95 goto out; 95 goto out;
96 } 96 }
97 97
@@ -108,8 +108,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
108 O_NONBLOCK); 108 O_NONBLOCK);
109 sock = NULL; 109 sock = NULL;
110 110
111 rdsdebug("connect to address %u.%u.%u.%u returned %d\n", 111 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
112 NIPQUAD(conn->c_faddr), ret);
113 if (ret == -EINPROGRESS) 112 if (ret == -EINPROGRESS)
114 ret = 0; 113 ret = 0;
115 114
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 45474a436862..975183fe6950 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/gfp.h>
34#include <linux/in.h> 35#include <linux/in.h>
35#include <net/tcp.h> 36#include <net/tcp.h>
36 37
@@ -66,9 +67,9 @@ static int rds_tcp_accept_one(struct socket *sock)
66 67
67 inet = inet_sk(new_sock->sk); 68 inet = inet_sk(new_sock->sk);
68 69
69 rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", 70 rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
70 NIPQUAD(inet->inet_saddr), ntohs(inet->inet_sport), 71 &inet->inet_saddr, ntohs(inet->inet_sport),
71 NIPQUAD(inet->inet_daddr), ntohs(inet->inet_dport)); 72 &inet->inet_daddr, ntohs(inet->inet_dport));
72 73
73 conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr, 74 conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
74 &rds_tcp_transport, GFP_KERNEL); 75 &rds_tcp_transport, GFP_KERNEL);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index c00dafffbb5a..e08ec912d8b0 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -31,6 +31,7 @@
31 * 31 *
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h>
34#include <net/tcp.h> 35#include <net/tcp.h>
35 36
36#include "rds.h" 37#include "rds.h"
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index ab545e0cd5d6..34fdcc059e54 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -193,9 +193,9 @@ out:
193 rds_tcp_stats_inc(s_tcp_sndbuf_full); 193 rds_tcp_stats_inc(s_tcp_sndbuf_full);
194 ret = 0; 194 ret = 0;
195 } else { 195 } else {
196 printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u " 196 printk(KERN_WARNING "RDS/tcp: send to %pI4 "
197 "returned %d, disconnecting and reconnecting\n", 197 "returned %d, disconnecting and reconnecting\n",
198 NIPQUAD(conn->c_faddr), ret); 198 &conn->c_faddr, ret);
199 rds_conn_drop(conn); 199 rds_conn_drop(conn);
200 } 200 }
201 } 201 }
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index c218e07e5caf..a9fa86f65983 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -33,6 +33,7 @@
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/slab.h>
36 37
37#include "rfkill.h" 38#include "rfkill.h"
38 39
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index a7295ad5f9cb..3713d7ecab96 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -212,6 +212,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
212 case KEY_WIMAX: 212 case KEY_WIMAX:
213 rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); 213 rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
214 break; 214 break;
215 case KEY_RFKILL:
216 rfkill_schedule_toggle(RFKILL_TYPE_ALL);
217 break;
215 } 218 }
216 } else if (type == EV_SW && code == SW_RFKILL_ALL) 219 } else if (type == EV_SW && code == SW_RFKILL_ALL)
217 rfkill_schedule_evsw_rfkillall(data); 220 rfkill_schedule_evsw_rfkillall(data);
@@ -295,6 +298,11 @@ static const struct input_device_id rfkill_ids[] = {
295 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, 298 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
296 }, 299 },
297 { 300 {
301 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
302 .evbit = { BIT_MASK(EV_KEY) },
303 .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
304 },
305 {
298 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, 306 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
299 .evbit = { BIT(EV_SW) }, 307 .evbit = { BIT(EV_SW) },
300 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, 308 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8feb9e5d6623..4fb711a035f4 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -18,6 +18,7 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/socket.h> 19#include <linux/socket.h>
20#include <linux/in.h> 20#include <linux/in.h>
21#include <linux/slab.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
@@ -1404,29 +1405,13 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1404static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1405static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1405 __acquires(rose_list_lock) 1406 __acquires(rose_list_lock)
1406{ 1407{
1407 int i;
1408 struct sock *s;
1409 struct hlist_node *node;
1410
1411 spin_lock_bh(&rose_list_lock); 1408 spin_lock_bh(&rose_list_lock);
1412 if (*pos == 0) 1409 return seq_hlist_start_head(&rose_list, *pos);
1413 return SEQ_START_TOKEN;
1414
1415 i = 1;
1416 sk_for_each(s, node, &rose_list) {
1417 if (i == *pos)
1418 return s;
1419 ++i;
1420 }
1421 return NULL;
1422} 1410}
1423 1411
1424static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1412static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1425{ 1413{
1426 ++*pos; 1414 return seq_hlist_next(v, &rose_list, pos);
1427
1428 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1429 : sk_next((struct sock *)v);
1430} 1415}
1431 1416
1432static void rose_info_stop(struct seq_file *seq, void *v) 1417static void rose_info_stop(struct seq_file *seq, void *v)
@@ -1444,7 +1429,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
1444 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1429 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1445 1430
1446 else { 1431 else {
1447 struct sock *s = v; 1432 struct sock *s = sk_entry(v);
1448 struct rose_sock *rose = rose_sk(s); 1433 struct rose_sock *rose = rose_sk(s);
1449 const char *devname, *callsign; 1434 const char *devname, *callsign;
1450 const struct net_device *dev = rose->device; 1435 const struct net_device *dev = rose->device;
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 424b893d1450..178ff4f73c85 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -19,6 +19,7 @@
19#include <linux/fcntl.h> 19#include <linux/fcntl.h>
20#include <linux/in.h> 20#include <linux/in.h>
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/slab.h>
22 23
23#include <asm/system.h> 24#include <asm/system.h>
24#include <asm/io.h> 25#include <asm/io.h>
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index 5ef5f6988a2e..a750a28e0221 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 968e8bac1b5d..ae4a9d99aec7 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -7,6 +7,7 @@
7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 */ 8 */
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/slab.h>
10#include <linux/socket.h> 11#include <linux/socket.h>
11#include <linux/timer.h> 12#include <linux/timer.h>
12#include <net/ax25.h> 13#include <net/ax25.h>
diff --git a/net/rose/rose_out.c b/net/rose/rose_out.c
index 69820f93414b..4ebf33afbe47 100644
--- a/net/rose/rose_out.c
+++ b/net/rose/rose_out.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/sockios.h> 16#include <linux/sockios.h>
17#include <linux/net.h> 17#include <linux/net.h>
18#include <linux/gfp.h>
18#include <net/ax25.h> 19#include <net/ax25.h>
19#include <linux/inet.h> 20#include <linux/inet.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 70a0b3b4b4d2..cbc244a128bd 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/sockios.h> 17#include <linux/sockios.h>
18#include <linux/net.h> 18#include <linux/net.h>
19#include <linux/slab.h>
19#include <net/ax25.h> 20#include <net/ax25.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index b05108f382da..1734abba26a2 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/sockios.h> 16#include <linux/sockios.h>
17#include <linux/net.h> 17#include <linux/net.h>
18#include <linux/slab.h>
18#include <net/ax25.h> 19#include <net/ax25.h>
19#include <linux/inet.h> 20#include <linux/inet.h>
20#include <linux/netdevice.h> 21#include <linux/netdevice.h>
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 287b1415cee9..c060095b27ce 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/net.h> 13#include <linux/net.h>
14#include <linux/slab.h>
14#include <linux/skbuff.h> 15#include <linux/skbuff.h>
15#include <linux/poll.h> 16#include <linux/poll.h>
16#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 77228f28fa36..6d79310fcaae 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -17,6 +17,7 @@
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/in6.h> 18#include <linux/in6.h>
19#include <linux/icmp.h> 19#include <linux/icmp.h>
20#include <linux/gfp.h>
20#include <net/sock.h> 21#include <net/sock.h>
21#include <net/af_rxrpc.h> 22#include <net/af_rxrpc.h>
22#include <net/ip.h> 23#include <net/ip.h>
@@ -88,6 +89,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
88 89
89 /* get a notification message to send to the server app */ 90 /* get a notification message to send to the server app */
90 notification = alloc_skb(0, GFP_NOFS); 91 notification = alloc_skb(0, GFP_NOFS);
92 if (!notification) {
93 _debug("no memory");
94 ret = -ENOMEM;
95 goto error_nofree;
96 }
91 rxrpc_new_skb(notification); 97 rxrpc_new_skb(notification);
92 notification->mark = RXRPC_SKB_MARK_NEW_CALL; 98 notification->mark = RXRPC_SKB_MARK_NEW_CALL;
93 99
@@ -189,6 +195,7 @@ invalid_service:
189 ret = -ECONNREFUSED; 195 ret = -ECONNREFUSED;
190error: 196error:
191 rxrpc_free_skb(notification); 197 rxrpc_free_skb(notification);
198error_nofree:
192 _leave(" = %d", ret); 199 _leave(" = %d", ret);
193 return ret; 200 return ret;
194} 201}
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index b4a220977031..2714da167fb8 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -13,6 +13,7 @@
13#include <linux/circ_buf.h> 13#include <linux/circ_buf.h>
14#include <linux/net.h> 14#include <linux/net.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/slab.h>
16#include <linux/udp.h> 17#include <linux/udp.h>
17#include <net/sock.h> 18#include <net/sock.h>
18#include <net/af_rxrpc.h> 19#include <net/af_rxrpc.h>
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index bc0019f704fe..909d092de9f4 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -9,6 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/circ_buf.h> 14#include <linux/circ_buf.h>
14#include <net/sock.h> 15#include <net/sock.h>
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 9f1ce841a0bb..4106ca95ec86 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/net.h> 14#include <linux/net.h>
14#include <linux/skbuff.h> 15#include <linux/skbuff.h>
15#include <linux/crypto.h> 16#include <linux/crypto.h>
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index f98c8027e5c1..89315009bab1 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -17,6 +17,7 @@
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/in6.h> 18#include <linux/in6.h>
19#include <linux/icmp.h> 19#include <linux/icmp.h>
20#include <linux/gfp.h>
20#include <net/sock.h> 21#include <net/sock.h>
21#include <net/af_rxrpc.h> 22#include <net/af_rxrpc.h>
22#include <net/ip.h> 23#include <net/ip.h>
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 74697b200496..5ee16f0353fe 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -18,6 +18,7 @@
18#include <linux/key-type.h> 18#include <linux/key-type.h>
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/ctype.h> 20#include <linux/ctype.h>
21#include <linux/slab.h>
21#include <net/sock.h> 22#include <net/sock.h>
22#include <net/af_rxrpc.h> 23#include <net/af_rxrpc.h>
23#include <keys/rxrpc-type.h> 24#include <keys/rxrpc-type.h>
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index 807535ff29b5..87f7135d238b 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/net.h> 13#include <linux/net.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/af_rxrpc.h> 17#include <net/af_rxrpc.h>
17#include "ar-internal.h" 18#include "ar-internal.h"
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index cc9102c5b588..5f22e263eda7 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/net.h> 12#include <linux/net.h>
13#include <linux/gfp.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
15#include <net/sock.h> 16#include <net/sock.h>
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index edc026c1eb76..f0f85b0123f7 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -16,6 +16,7 @@
16#include <linux/in.h> 16#include <linux/in.h>
17#include <linux/in6.h> 17#include <linux/in6.h>
18#include <linux/icmp.h> 18#include <linux/icmp.h>
19#include <linux/slab.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <net/af_rxrpc.h> 21#include <net/af_rxrpc.h>
21#include <net/ip.h> 22#include <net/ip.h>
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 0936e1acc30e..5e0226fe587e 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/net.h> 13#include <linux/net.h>
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/slab.h>
15#include <net/sock.h> 16#include <net/sock.h>
16#include <net/af_rxrpc.h> 17#include <net/af_rxrpc.h>
17#include "ar-internal.h" 18#include "ar-internal.h"
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 713ac593e2e9..7635107726ce 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -16,6 +16,7 @@
16#include <linux/crypto.h> 16#include <linux/crypto.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/ctype.h> 18#include <linux/ctype.h>
19#include <linux/slab.h>
19#include <net/sock.h> 20#include <net/sock.h>
20#include <net/af_rxrpc.h> 21#include <net/af_rxrpc.h>
21#include <keys/rxrpc-type.h> 22#include <keys/rxrpc-type.h>
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 21f9c7678aa3..2f691fb180d1 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -328,13 +328,16 @@ config NET_CLS_FLOW
328 module will be called cls_flow. 328 module will be called cls_flow.
329 329
330config NET_CLS_CGROUP 330config NET_CLS_CGROUP
331 bool "Control Group Classifier" 331 tristate "Control Group Classifier"
332 select NET_CLS 332 select NET_CLS
333 depends on CGROUPS 333 depends on CGROUPS
334 ---help--- 334 ---help---
335 Say Y here if you want to classify packets based on the control 335 Say Y here if you want to classify packets based on the control
336 cgroup of their process. 336 cgroup of their process.
337 337
338 To compile this code as a module, choose M here: the
339 module will be called cls_cgroup.
340
338config NET_EMATCH 341config NET_EMATCH
339 bool "Extended Matches" 342 bool "Extended Matches"
340 select NET_CLS 343 select NET_CLS
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 64f5e328cee9..d8e0171d9a4b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/slab.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/kmod.h> 21#include <linux/kmod.h>
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 082c520b0def..da27a170b6b7 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -19,6 +19,7 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/slab.h>
22#include <net/netlink.h> 23#include <net/netlink.h>
23#include <net/pkt_sched.h> 24#include <net/pkt_sched.h>
24#include <linux/tc_act/tc_ipt.h> 25#include <linux/tc_act/tc_ipt.h>
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index d329170243cb..c046682054eb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -20,6 +20,7 @@
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/gfp.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/netlink.h> 25#include <net/netlink.h>
25#include <net/pkt_sched.h> 26#include <net/pkt_sched.h>
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 6b0359a500e6..b7dcfedc802e 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -17,6 +17,7 @@
17#include <linux/rtnetlink.h> 17#include <linux/rtnetlink.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/slab.h>
20#include <net/netlink.h> 21#include <net/netlink.h>
21#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
22#include <linux/tc_act/tc_pedit.h> 23#include <linux/tc_act/tc_pedit.h>
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 723964c3ee4f..654f73dff7c1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/slab.h>
21#include <net/act_api.h> 22#include <net/act_api.h>
22#include <net/netlink.h> 23#include <net/netlink.h>
23 24
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 8daa1ebc7413..622ca809c15c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3725d8fa29db..f082b27ff46d 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -24,6 +24,7 @@
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/netlink.h> 25#include <linux/netlink.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/slab.h>
27#include <net/net_namespace.h> 28#include <net/net_namespace.h>
28#include <net/sock.h> 29#include <net/sock.h>
29#include <net/netlink.h> 30#include <net/netlink.h>
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 4e2bda854119..efd4f95fd050 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/string.h> 16#include <linux/string.h>
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index e4877ca6727c..221180384fd7 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
@@ -24,6 +25,25 @@ struct cgroup_cls_state
24 u32 classid; 25 u32 classid;
25}; 26};
26 27
28static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
29 struct cgroup *cgrp);
30static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
31static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
32
33struct cgroup_subsys net_cls_subsys = {
34 .name = "net_cls",
35 .create = cgrp_create,
36 .destroy = cgrp_destroy,
37 .populate = cgrp_populate,
38#ifdef CONFIG_NET_CLS_CGROUP
39 .subsys_id = net_cls_subsys_id,
40#else
41#define net_cls_subsys_id net_cls_subsys.subsys_id
42#endif
43 .module = THIS_MODULE,
44};
45
46
27static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 47static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
28{ 48{
29 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 49 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
@@ -79,14 +99,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
79 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 99 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
80} 100}
81 101
82struct cgroup_subsys net_cls_subsys = {
83 .name = "net_cls",
84 .create = cgrp_create,
85 .destroy = cgrp_destroy,
86 .populate = cgrp_populate,
87 .subsys_id = net_cls_subsys_id,
88};
89
90struct cls_cgroup_head 102struct cls_cgroup_head
91{ 103{
92 u32 handle; 104 u32 handle;
@@ -277,12 +289,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
277 289
278static int __init init_cgroup_cls(void) 290static int __init init_cgroup_cls(void)
279{ 291{
280 return register_tcf_proto_ops(&cls_cgroup_ops); 292 int ret = register_tcf_proto_ops(&cls_cgroup_ops);
293 if (ret)
294 return ret;
295 ret = cgroup_load_subsys(&net_cls_subsys);
296 if (ret)
297 unregister_tcf_proto_ops(&cls_cgroup_ops);
298 return ret;
281} 299}
282 300
283static void __exit exit_cgroup_cls(void) 301static void __exit exit_cgroup_cls(void)
284{ 302{
285 unregister_tcf_proto_ops(&cls_cgroup_ops); 303 unregister_tcf_proto_ops(&cls_cgroup_ops);
304 cgroup_unload_subsys(&net_cls_subsys);
286} 305}
287 306
288module_init(init_cgroup_cls); 307module_init(init_cgroup_cls);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e054c62857e1..6ed61b10e002 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -20,6 +20,7 @@
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/ipv6.h> 21#include <linux/ipv6.h>
22#include <linux/if_vlan.h> 22#include <linux/if_vlan.h>
23#include <linux/slab.h>
23 24
24#include <net/pkt_cls.h> 25#include <net/pkt_cls.h>
25#include <net/ip.h> 26#include <net/ip.h>
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 6d6e87585fb1..93b0a7b6f9b4 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/string.h> 25#include <linux/string.h>
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index dd872d5383ef..694dcd85dec8 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/string.h> 16#include <linux/string.h>
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index e806f2314b5e..20ef330bb918 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h>
12#include <net/act_api.h> 13#include <net/act_api.h>
13#include <net/netlink.h> 14#include <net/netlink.h>
14#include <net/pkt_cls.h> 15#include <net/pkt_cls.h>
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 07372f60bee3..17c5dfc67320 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/slab.h>
34#include <linux/types.h> 35#include <linux/types.h>
35#include <linux/kernel.h> 36#include <linux/kernel.h>
36#include <linux/string.h> 37#include <linux/string.h>
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 24dce8b648a4..3bcac8aa333c 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -58,6 +58,7 @@
58 * only available if that subsystem is enabled in the kernel. 58 * only available if that subsystem is enabled in the kernel.
59 */ 59 */
60 60
61#include <linux/slab.h>
61#include <linux/module.h> 62#include <linux/module.h>
62#include <linux/types.h> 63#include <linux/types.h>
63#include <linux/kernel.h> 64#include <linux/kernel.h>
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 370a1b2ea317..1a4176aee6e5 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -9,6 +9,7 @@
9 * Authors: Thomas Graf <tgraf@suug.ch> 9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */ 10 */
11 11
12#include <linux/gfp.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 853c5ead87fd..763253257411 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -9,6 +9,7 @@
9 * Authors: Thomas Graf <tgraf@suug.ch> 9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index aab59409728b..e782bdeedc58 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -82,6 +82,7 @@
82 */ 82 */
83 83
84#include <linux/module.h> 84#include <linux/module.h>
85#include <linux/slab.h>
85#include <linux/types.h> 86#include <linux/types.h>
86#include <linux/kernel.h> 87#include <linux/kernel.h>
87#include <linux/errno.h> 88#include <linux/errno.h>
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 75fd1c672c61..145268ca57cf 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -28,6 +28,7 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/hrtimer.h> 29#include <linux/hrtimer.h>
30#include <linux/lockdep.h> 30#include <linux/lockdep.h>
31#include <linux/slab.h>
31 32
32#include <net/net_namespace.h> 33#include <net/net_namespace.h>
33#include <net/sock.h> 34#include <net/sock.h>
@@ -1707,6 +1708,7 @@ static int __init pktsched_init(void)
1707{ 1708{
1708 register_qdisc(&pfifo_qdisc_ops); 1709 register_qdisc(&pfifo_qdisc_ops);
1709 register_qdisc(&bfifo_qdisc_ops); 1710 register_qdisc(&bfifo_qdisc_ops);
1711 register_qdisc(&pfifo_head_drop_qdisc_ops);
1710 register_qdisc(&mq_qdisc_ops); 1712 register_qdisc(&mq_qdisc_ops);
1711 proc_net_fops_create(&init_net, "psched", 0, &psched_fops); 1713 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1712 1714
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ab82f145f689..fcbb86a486a2 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -3,6 +3,7 @@
3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/slab.h>
6#include <linux/init.h> 7#include <linux/init.h>
7#include <linux/string.h> 8#include <linux/string.h>
8#include <linux/errno.h> 9#include <linux/errno.h>
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 3846d65bc03e..28c01ef5abc8 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/string.h> 17#include <linux/string.h>
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index a65604f8f2b8..b74046a95397 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/slab.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/errno.h> 14#include <linux/errno.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index d303daa45d49..63d41f86679c 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/slab.h>
8#include <linux/types.h> 9#include <linux/types.h>
9#include <linux/string.h> 10#include <linux/string.h>
10#include <linux/errno.h> 11#include <linux/errno.h>
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 69188e8358b4..5948bafa8ce2 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
@@ -43,6 +44,26 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
43 return qdisc_reshape_fail(skb, sch); 44 return qdisc_reshape_fail(skb, sch);
44} 45}
45 46
47static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
48{
49 struct sk_buff *skb_head;
50 struct fifo_sched_data *q = qdisc_priv(sch);
51
52 if (likely(skb_queue_len(&sch->q) < q->limit))
53 return qdisc_enqueue_tail(skb, sch);
54
55 /* queue full, remove one skb to fulfill the limit */
56 skb_head = qdisc_dequeue_head(sch);
57 sch->bstats.bytes -= qdisc_pkt_len(skb_head);
58 sch->bstats.packets--;
59 sch->qstats.drops++;
60 kfree_skb(skb_head);
61
62 qdisc_enqueue_tail(skb, sch);
63
64 return NET_XMIT_CN;
65}
66
46static int fifo_init(struct Qdisc *sch, struct nlattr *opt) 67static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
47{ 68{
48 struct fifo_sched_data *q = qdisc_priv(sch); 69 struct fifo_sched_data *q = qdisc_priv(sch);
@@ -108,6 +129,20 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
108}; 129};
109EXPORT_SYMBOL(bfifo_qdisc_ops); 130EXPORT_SYMBOL(bfifo_qdisc_ops);
110 131
132struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
133 .id = "pfifo_head_drop",
134 .priv_size = sizeof(struct fifo_sched_data),
135 .enqueue = pfifo_tail_enqueue,
136 .dequeue = qdisc_dequeue_head,
137 .peek = qdisc_peek_head,
138 .drop = qdisc_queue_drop_head,
139 .init = fifo_init,
140 .reset = qdisc_reset_queue,
141 .change = fifo_init,
142 .dump = fifo_dump,
143 .owner = THIS_MODULE,
144};
145
111/* Pass size change message down to embedded FIFO */ 146/* Pass size change message down to embedded FIFO */
112int fifo_set_limit(struct Qdisc *q, unsigned int limit) 147int fifo_set_limit(struct Qdisc *q, unsigned int limit)
113{ 148{
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5173c1e1b19c..ff4dd53eeff0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -24,6 +24,7 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h>
27#include <net/pkt_sched.h> 28#include <net/pkt_sched.h>
28 29
29/* Main transmission queue. */ 30/* Main transmission queue. */
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 40408d595c08..51dcc2aa5c92 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -18,6 +18,7 @@
18 * For all the glorious comments look at include/net/red.h 18 * For all the glorious comments look at include/net/red.h
19 */ 19 */
20 20
21#include <linux/slab.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 508cf5f3a6d5..0b52b8de562c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -36,6 +36,7 @@
36#include <linux/compiler.h> 36#include <linux/compiler.h>
37#include <linux/rbtree.h> 37#include <linux/rbtree.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/slab.h>
39#include <net/netlink.h> 40#include <net/netlink.h>
40#include <net/pkt_sched.h> 41#include <net/pkt_sched.h>
41 42
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index d1dea3d5dc92..b2aba3f5e6fa 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/slab.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/string.h> 14#include <linux/string.h>
14#include <linux/errno.h> 15#include <linux/errno.h>
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 7db2c88ce585..c50876cd8704 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/slab.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/string.h> 24#include <linux/string.h>
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index d8b10e054627..4714ff162bbd 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h>
17#include <linux/types.h> 18#include <linux/types.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/errno.h> 20#include <linux/errno.h>
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 93285cecb246..81672e0c1b25 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/string.h> 18#include <linux/string.h>
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index cb21380c0605..c5a9ac566007 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -20,6 +20,7 @@
20#include <linux/ipv6.h> 20#include <linux/ipv6.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/jhash.h> 22#include <linux/jhash.h>
23#include <linux/slab.h>
23#include <net/ip.h> 24#include <net/ip.h>
24#include <net/netlink.h> 25#include <net/netlink.h>
25#include <net/pkt_sched.h> 26#include <net/pkt_sched.h>
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index db69637069c4..3415b6ce1c0a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14#include <linux/string.h> 15#include <linux/string.h>
15#include <linux/errno.h> 16#include <linux/errno.h>
16#include <linux/if_arp.h> 17#include <linux/if_arp.h>
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index df5abbff63e2..99c93ee98ad9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
1194 /* Remove any peer addresses not present in the new association. */ 1194 /* Remove any peer addresses not present in the new association. */
1195 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1195 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1196 trans = list_entry(pos, struct sctp_transport, transports); 1196 trans = list_entry(pos, struct sctp_transport, transports);
1197 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) 1197 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1198 sctp_assoc_del_peer(asoc, &trans->ipaddr); 1198 sctp_assoc_rm_peer(asoc, trans);
1199 continue;
1200 }
1199 1201
1200 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1202 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1201 sctp_transport_reset(trans); 1203 sctp_transport_reset(trans);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 56935bbc1496..86366390038a 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -34,6 +34,7 @@
34 * be incorporated into the next SCTP release. 34 * be incorporated into the next SCTP release.
35 */ 35 */
36 36
37#include <linux/slab.h>
37#include <linux/types.h> 38#include <linux/types.h>
38#include <linux/crypto.h> 39#include <linux/crypto.h>
39#include <linux/scatterlist.h> 40#include <linux/scatterlist.h>
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 13a6fba41077..faf71d179e46 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/slab.h>
46#include <linux/in.h> 47#include <linux/in.h>
47#include <net/sock.h> 48#include <net/sock.h>
48#include <net/ipv6.h> 49#include <net/ipv6.h>
@@ -186,7 +187,6 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
186 addr->valid = 1; 187 addr->valid = 1;
187 188
188 INIT_LIST_HEAD(&addr->list); 189 INIT_LIST_HEAD(&addr->list);
189 INIT_RCU_HEAD(&addr->rcu);
190 190
191 /* We always hold a socket lock when calling this function, 191 /* We always hold a socket lock when calling this function,
192 * and that acts as a writer synchronizing lock. 192 * and that acts as a writer synchronizing lock.
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 8e4320040f05..3eab6db59a37 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -42,6 +42,7 @@
42#include <linux/net.h> 42#include <linux/net.h>
43#include <linux/inet.h> 43#include <linux/inet.h>
44#include <linux/skbuff.h> 44#include <linux/skbuff.h>
45#include <linux/slab.h>
45#include <net/sock.h> 46#include <net/sock.h>
46#include <net/sctp/sctp.h> 47#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 48#include <net/sctp/sm.h>
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 905fda582b92..7ec09ba03a1c 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
144 /* Use SCTP specific send buffer space queues. */ 144 /* Use SCTP specific send buffer space queues. */
145 ep->sndbuf_policy = sctp_sndbuf_policy; 145 ep->sndbuf_policy = sctp_sndbuf_policy;
146 146
147 sk->sk_data_ready = sctp_data_ready;
147 sk->sk_write_space = sctp_write_space; 148 sk->sk_write_space = sctp_write_space;
148 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 149 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
149 150
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c0c973e67add..2a570184e5a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -53,6 +53,7 @@
53#include <linux/socket.h> 53#include <linux/socket.h>
54#include <linux/ip.h> 54#include <linux/ip.h>
55#include <linux/time.h> /* For struct timeval */ 55#include <linux/time.h> /* For struct timeval */
56#include <linux/slab.h>
56#include <net/ip.h> 57#include <net/ip.h>
57#include <net/icmp.h> 58#include <net/icmp.h>
58#include <net/snmp.h> 59#include <net/snmp.h>
@@ -75,7 +76,7 @@ static struct sctp_association *__sctp_lookup_association(
75 const union sctp_addr *peer, 76 const union sctp_addr *peer,
76 struct sctp_transport **pt); 77 struct sctp_transport **pt);
77 78
78static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); 79static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
79 80
80 81
81/* Calculate the SCTP checksum of an SCTP packet. */ 82/* Calculate the SCTP checksum of an SCTP packet. */
@@ -265,8 +266,13 @@ int sctp_rcv(struct sk_buff *skb)
265 } 266 }
266 267
267 if (sock_owned_by_user(sk)) { 268 if (sock_owned_by_user(sk)) {
269 if (sctp_add_backlog(sk, skb)) {
270 sctp_bh_unlock_sock(sk);
271 sctp_chunk_free(chunk);
272 skb = NULL; /* sctp_chunk_free already freed the skb */
273 goto discard_release;
274 }
268 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 275 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
269 sctp_add_backlog(sk, skb);
270 } else { 276 } else {
271 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); 277 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
272 sctp_inq_push(&chunk->rcvr->inqueue, chunk); 278 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +342,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
336 sctp_bh_lock_sock(sk); 342 sctp_bh_lock_sock(sk);
337 343
338 if (sock_owned_by_user(sk)) { 344 if (sock_owned_by_user(sk)) {
339 sk_add_backlog(sk, skb); 345 if (sk_add_backlog(sk, skb))
340 backloged = 1; 346 sctp_chunk_free(chunk);
347 else
348 backloged = 1;
341 } else 349 } else
342 sctp_inq_push(inqueue, chunk); 350 sctp_inq_push(inqueue, chunk);
343 351
@@ -362,22 +370,27 @@ done:
362 return 0; 370 return 0;
363} 371}
364 372
365static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 373static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
366{ 374{
367 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 375 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
368 struct sctp_ep_common *rcvr = chunk->rcvr; 376 struct sctp_ep_common *rcvr = chunk->rcvr;
377 int ret;
369 378
370 /* Hold the assoc/ep while hanging on the backlog queue. 379 ret = sk_add_backlog(sk, skb);
371 * This way, we know structures we need will not disappear from us 380 if (!ret) {
372 */ 381 /* Hold the assoc/ep while hanging on the backlog queue.
373 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 382 * This way, we know structures we need will not disappear
374 sctp_association_hold(sctp_assoc(rcvr)); 383 * from us
375 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 384 */
376 sctp_endpoint_hold(sctp_ep(rcvr)); 385 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
377 else 386 sctp_association_hold(sctp_assoc(rcvr));
378 BUG(); 387 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
388 sctp_endpoint_hold(sctp_ep(rcvr));
389 else
390 BUG();
391 }
392 return ret;
379 393
380 sk_add_backlog(sk, skb);
381} 394}
382 395
383/* Handle icmp frag needed error. */ 396/* Handle icmp frag needed error. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index bbf5dd2a97c4..ccb6dc48d15b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -46,6 +46,7 @@
46#include <net/sctp/sctp.h> 46#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 47#include <net/sctp/sm.h>
48#include <linux/interrupt.h> 48#include <linux/interrupt.h>
49#include <linux/slab.h>
49 50
50/* Initialize an SCTP inqueue. */ 51/* Initialize an SCTP inqueue. */
51void sctp_inq_init(struct sctp_inq *queue) 52void sctp_inq_init(struct sctp_inq *queue)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index cc50fbe99291..9fb5d37c37ad 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -58,6 +58,7 @@
58#include <linux/netdevice.h> 58#include <linux/netdevice.h>
59#include <linux/init.h> 59#include <linux/init.h>
60#include <linux/ipsec.h> 60#include <linux/ipsec.h>
61#include <linux/slab.h>
61 62
62#include <linux/ipv6.h> 63#include <linux/ipv6.h>
63#include <linux/icmpv6.h> 64#include <linux/icmpv6.h>
@@ -381,7 +382,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
381 addr->a.v6.sin6_scope_id = dev->ifindex; 382 addr->a.v6.sin6_scope_id = dev->ifindex;
382 addr->valid = 1; 383 addr->valid = 1;
383 INIT_LIST_HEAD(&addr->list); 384 INIT_LIST_HEAD(&addr->list);
384 INIT_RCU_HEAD(&addr->rcu);
385 list_add_tail(&addr->list, addrlist); 385 list_add_tail(&addr->list, addrlist);
386 } 386 }
387 } 387 }
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7c5589363433..fad261d41ec2 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -48,6 +48,7 @@
48#include <linux/ip.h> 48#include <linux/ip.h>
49#include <linux/ipv6.h> 49#include <linux/ipv6.h>
50#include <linux/init.h> 50#include <linux/init.h>
51#include <linux/slab.h>
51#include <net/inet_ecn.h> 52#include <net/inet_ecn.h>
52#include <net/ip.h> 53#include <net/ip.h>
53#include <net/icmp.h> 54#include <net/icmp.h>
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 229690f02a1d..abfc0b8dee74 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -50,6 +50,7 @@
50#include <linux/list.h> /* For struct list_head */ 50#include <linux/list.h> /* For struct list_head */
51#include <linux/socket.h> 51#include <linux/socket.h>
52#include <linux/ip.h> 52#include <linux/ip.h>
53#include <linux/slab.h>
53#include <net/sock.h> /* For skb_set_owner_w */ 54#include <net/sock.h> /* For skb_set_owner_w */
54 55
55#include <net/sctp/sctp.h> 56#include <net/sctp/sctp.h>
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 8cb4f060bce6..534c7eae9d15 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -50,6 +50,7 @@
50#include <linux/socket.h> 50#include <linux/socket.h>
51#include <linux/ip.h> 51#include <linux/ip.h>
52#include <linux/time.h> /* For struct timeval */ 52#include <linux/time.h> /* For struct timeval */
53#include <linux/gfp.h>
53#include <net/sock.h> 54#include <net/sock.h>
54#include <net/sctp/sctp.h> 55#include <net/sctp/sctp.h>
55#include <net/sctp/sm.h> 56#include <net/sctp/sm.h>
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d093cbfeaac4..784bcc9a979d 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -40,7 +40,7 @@
40#include <net/sctp/sctp.h> 40#include <net/sctp/sctp.h>
41#include <net/ip.h> /* for snmp_fold_field */ 41#include <net/ip.h> /* for snmp_fold_field */
42 42
43static struct snmp_mib sctp_snmp_list[] = { 43static const struct snmp_mib sctp_snmp_list[] = {
44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), 44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), 45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), 46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
@@ -83,7 +83,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
83 83
84 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 84 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
86 snmp_fold_field((void **)sctp_statistics, 86 snmp_fold_field((void __percpu **)sctp_statistics,
87 sctp_snmp_list[i].entry)); 87 sctp_snmp_list[i].entry));
88 88
89 return 0; 89 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a3c8988758b1..a56f98e82f92 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -54,6 +54,7 @@
54#include <linux/bootmem.h> 54#include <linux/bootmem.h>
55#include <linux/highmem.h> 55#include <linux/highmem.h>
56#include <linux/swap.h> 56#include <linux/swap.h>
57#include <linux/slab.h>
57#include <net/net_namespace.h> 58#include <net/net_namespace.h>
58#include <net/protocol.h> 59#include <net/protocol.h>
59#include <net/ip.h> 60#include <net/ip.h>
@@ -188,7 +189,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
188 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 189 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
189 addr->valid = 1; 190 addr->valid = 1;
190 INIT_LIST_HEAD(&addr->list); 191 INIT_LIST_HEAD(&addr->list);
191 INIT_RCU_HEAD(&addr->rcu);
192 list_add_tail(&addr->list, addrlist); 192 list_add_tail(&addr->list, addrlist);
193 } 193 }
194 } 194 }
@@ -996,12 +996,13 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
996 996
997static inline int init_sctp_mibs(void) 997static inline int init_sctp_mibs(void)
998{ 998{
999 return snmp_mib_init((void**)sctp_statistics, sizeof(struct sctp_mib)); 999 return snmp_mib_init((void __percpu **)sctp_statistics,
1000 sizeof(struct sctp_mib));
1000} 1001}
1001 1002
1002static inline void cleanup_sctp_mibs(void) 1003static inline void cleanup_sctp_mibs(void)
1003{ 1004{
1004 snmp_mib_free((void**)sctp_statistics); 1005 snmp_mib_free((void __percpu **)sctp_statistics);
1005} 1006}
1006 1007
1007static void sctp_v4_pf_init(void) 1008static void sctp_v4_pf_init(void)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9e732916b671..30c1767186b8 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -58,6 +58,7 @@
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/scatterlist.h> 59#include <linux/scatterlist.h>
60#include <linux/crypto.h> 60#include <linux/crypto.h>
61#include <linux/slab.h>
61#include <net/sock.h> 62#include <net/sock.h>
62 63
63#include <linux/skbuff.h> 64#include <linux/skbuff.h>
@@ -107,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = {
107 cpu_to_be16(sizeof(struct sctp_paramhdr)), 108 cpu_to_be16(sizeof(struct sctp_paramhdr)),
108}; 109};
109 110
110/* A helper to initialize to initialize an op error inside a 111/* A helper to initialize an op error inside a
111 * provided chunk, as most cause codes will be embedded inside an 112 * provided chunk, as most cause codes will be embedded inside an
112 * abort chunk. 113 * abort chunk.
113 */ 114 */
@@ -124,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
124 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 125 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
125} 126}
126 127
128/* A helper to initialize an op error inside a
129 * provided chunk, as most cause codes will be embedded inside an
130 * abort chunk. Differs from sctp_init_cause in that it won't oops
131 * if there isn't enough space in the op error chunk
132 */
133int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
134 size_t paylen)
135{
136 sctp_errhdr_t err;
137 __u16 len;
138
139 /* Cause code constants are now defined in network order. */
140 err.cause = cause_code;
141 len = sizeof(sctp_errhdr_t) + paylen;
142 err.length = htons(len);
143
144 if (skb_tailroom(chunk->skb) > len)
145 return -ENOSPC;
146 chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
147 sizeof(sctp_errhdr_t),
148 &err);
149 return 0;
150}
127/* 3.3.2 Initiation (INIT) (1) 151/* 3.3.2 Initiation (INIT) (1)
128 * 152 *
129 * This chunk is used to initiate a SCTP association between two 153 * This chunk is used to initiate a SCTP association between two
@@ -207,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
207 sp = sctp_sk(asoc->base.sk); 231 sp = sctp_sk(asoc->base.sk);
208 num_types = sp->pf->supported_addrs(sp, types); 232 num_types = sp->pf->supported_addrs(sp, types);
209 233
210 chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); 234 chunksize = sizeof(init) + addrs_len;
235 chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
211 chunksize += sizeof(ecap_param); 236 chunksize += sizeof(ecap_param);
212 237
213 if (sctp_prsctp_enable) 238 if (sctp_prsctp_enable)
@@ -237,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
237 /* Add HMACS parameter length if any were defined */ 262 /* Add HMACS parameter length if any were defined */
238 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 263 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
239 if (auth_hmacs->length) 264 if (auth_hmacs->length)
240 chunksize += ntohs(auth_hmacs->length); 265 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
241 else 266 else
242 auth_hmacs = NULL; 267 auth_hmacs = NULL;
243 268
244 /* Add CHUNKS parameter length */ 269 /* Add CHUNKS parameter length */
245 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 270 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
246 if (auth_chunks->length) 271 if (auth_chunks->length)
247 chunksize += ntohs(auth_chunks->length); 272 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
248 else 273 else
249 auth_chunks = NULL; 274 auth_chunks = NULL;
250 275
@@ -254,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
254 279
255 /* If we have any extensions to report, account for that */ 280 /* If we have any extensions to report, account for that */
256 if (num_ext) 281 if (num_ext)
257 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 282 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
283 num_ext);
258 284
259 /* RFC 2960 3.3.2 Initiation (INIT) (1) 285 /* RFC 2960 3.3.2 Initiation (INIT) (1)
260 * 286 *
@@ -396,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
396 422
397 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 423 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
398 if (auth_hmacs->length) 424 if (auth_hmacs->length)
399 chunksize += ntohs(auth_hmacs->length); 425 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
400 else 426 else
401 auth_hmacs = NULL; 427 auth_hmacs = NULL;
402 428
403 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 429 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
404 if (auth_chunks->length) 430 if (auth_chunks->length)
405 chunksize += ntohs(auth_chunks->length); 431 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
406 else 432 else
407 auth_chunks = NULL; 433 auth_chunks = NULL;
408 434
@@ -411,7 +437,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
411 } 437 }
412 438
413 if (num_ext) 439 if (num_ext)
414 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 440 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
441 num_ext);
415 442
416 /* Now allocate and fill out the chunk. */ 443 /* Now allocate and fill out the chunk. */
417 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); 444 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
@@ -1128,6 +1155,24 @@ nodata:
1128 return retval; 1155 return retval;
1129} 1156}
1130 1157
1158/* Create an Operation Error chunk of a fixed size,
1159 * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
1160 * This is a helper function to allocate an error chunk for
1161 * for those invalid parameter codes in which we may not want
1162 * to report all the errors, if the incomming chunk is large
1163 */
1164static inline struct sctp_chunk *sctp_make_op_error_fixed(
1165 const struct sctp_association *asoc,
1166 const struct sctp_chunk *chunk)
1167{
1168 size_t size = asoc ? asoc->pathmtu : 0;
1169
1170 if (!size)
1171 size = SCTP_DEFAULT_MAXSEGMENT;
1172
1173 return sctp_make_op_error_space(asoc, chunk, size);
1174}
1175
1131/* Create an Operation Error chunk. */ 1176/* Create an Operation Error chunk. */
1132struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, 1177struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
1133 const struct sctp_chunk *chunk, 1178 const struct sctp_chunk *chunk,
@@ -1370,6 +1415,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1370 return target; 1415 return target;
1371} 1416}
1372 1417
1418/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
1419 * space in the chunk
1420 */
1421void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
1422 int len, const void *data)
1423{
1424 if (skb_tailroom(chunk->skb) > len)
1425 return sctp_addto_chunk(chunk, len, data);
1426 else
1427 return NULL;
1428}
1429
1373/* Append bytes from user space to the end of a chunk. Will panic if 1430/* Append bytes from user space to the end of a chunk. Will panic if
1374 * chunk is not big enough. 1431 * chunk is not big enough.
1375 * Returns a kernel err value. 1432 * Returns a kernel err value.
@@ -1973,13 +2030,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
1973 * returning multiple unknown parameters. 2030 * returning multiple unknown parameters.
1974 */ 2031 */
1975 if (NULL == *errp) 2032 if (NULL == *errp)
1976 *errp = sctp_make_op_error_space(asoc, chunk, 2033 *errp = sctp_make_op_error_fixed(asoc, chunk);
1977 ntohs(chunk->chunk_hdr->length));
1978 2034
1979 if (*errp) { 2035 if (*errp) {
1980 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 2036 sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1981 WORD_ROUND(ntohs(param.p->length))); 2037 WORD_ROUND(ntohs(param.p->length)));
1982 sctp_addto_chunk(*errp, 2038 sctp_addto_chunk_fixed(*errp,
1983 WORD_ROUND(ntohs(param.p->length)), 2039 WORD_ROUND(ntohs(param.p->length)),
1984 param.v); 2040 param.v);
1985 } else { 2041 } else {
@@ -3314,21 +3370,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3314 sctp_chunk_free(asconf); 3370 sctp_chunk_free(asconf);
3315 asoc->addip_last_asconf = NULL; 3371 asoc->addip_last_asconf = NULL;
3316 3372
3317 /* Send the next asconf chunk from the addip chunk queue. */
3318 if (!list_empty(&asoc->addip_chunk_list)) {
3319 struct list_head *entry = asoc->addip_chunk_list.next;
3320 asconf = list_entry(entry, struct sctp_chunk, list);
3321
3322 list_del_init(entry);
3323
3324 /* Hold the chunk until an ASCONF_ACK is received. */
3325 sctp_chunk_hold(asconf);
3326 if (sctp_primitive_ASCONF(asoc, asconf))
3327 sctp_chunk_free(asconf);
3328 else
3329 asoc->addip_last_asconf = asconf;
3330 }
3331
3332 return retval; 3373 return retval;
3333} 3374}
3334 3375
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 4e4ca65cd320..d5ae450b6f02 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -51,6 +51,7 @@
51#include <linux/types.h> 51#include <linux/types.h>
52#include <linux/socket.h> 52#include <linux/socket.h>
53#include <linux/ip.h> 53#include <linux/ip.h>
54#include <linux/gfp.h>
54#include <net/sock.h> 55#include <net/sock.h>
55#include <net/sctp/sctp.h> 56#include <net/sctp/sctp.h>
56#include <net/sctp/sm.h> 57#include <net/sctp/sm.h>
@@ -475,7 +476,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
475 * used to provide an upper bound to this doubling operation. 476 * used to provide an upper bound to this doubling operation.
476 * 477 *
477 * Special Case: the first HB doesn't trigger exponential backoff. 478 * Special Case: the first HB doesn't trigger exponential backoff.
478 * The first unacknowleged HB triggers it. We do this with a flag 479 * The first unacknowledged HB triggers it. We do this with a flag
479 * that indicates that we have an outstanding HB. 480 * that indicates that we have an outstanding HB.
480 */ 481 */
481 if (!is_hb || transport->hb_sent) { 482 if (!is_hb || transport->hb_sent) {
@@ -961,6 +962,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
961} 962}
962 963
963 964
965/* Sent the next ASCONF packet currently stored in the association.
966 * This happens after the ASCONF_ACK was succeffully processed.
967 */
968static void sctp_cmd_send_asconf(struct sctp_association *asoc)
969{
970 /* Send the next asconf chunk from the addip chunk
971 * queue.
972 */
973 if (!list_empty(&asoc->addip_chunk_list)) {
974 struct list_head *entry = asoc->addip_chunk_list.next;
975 struct sctp_chunk *asconf = list_entry(entry,
976 struct sctp_chunk, list);
977 list_del_init(entry);
978
979 /* Hold the chunk until an ASCONF_ACK is received. */
980 sctp_chunk_hold(asconf);
981 if (sctp_primitive_ASCONF(asoc, asconf))
982 sctp_chunk_free(asconf);
983 else
984 asoc->addip_last_asconf = asconf;
985 }
986}
987
964 988
965/* These three macros allow us to pull the debugging code out of the 989/* These three macros allow us to pull the debugging code out of the
966 * main flow of sctp_do_sm() to keep attention focused on the real 990 * main flow of sctp_do_sm() to keep attention focused on the real
@@ -1616,6 +1640,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1616 } 1640 }
1617 error = sctp_cmd_send_msg(asoc, cmd->obj.msg); 1641 error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1618 break; 1642 break;
1643 case SCTP_CMD_SEND_NEXT_ASCONF:
1644 sctp_cmd_send_asconf(asoc);
1645 break;
1619 default: 1646 default:
1620 printk(KERN_WARNING "Impossible command: %u, %p\n", 1647 printk(KERN_WARNING "Impossible command: %u, %p\n",
1621 cmd->verb, cmd->obj.ptr); 1648 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 47bc20d3a85b..24b2cd555637 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -56,6 +56,7 @@
56#include <linux/ipv6.h> 56#include <linux/ipv6.h>
57#include <linux/net.h> 57#include <linux/net.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/slab.h>
59#include <net/sock.h> 60#include <net/sock.h>
60#include <net/inet_ecn.h> 61#include <net/inet_ecn.h>
61#include <linux/skbuff.h> 62#include <linux/skbuff.h>
@@ -3675,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3675 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3676 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3676 3677
3677 if (!sctp_process_asconf_ack((struct sctp_association *)asoc, 3678 if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
3678 asconf_ack)) 3679 asconf_ack)) {
3680 /* Successfully processed ASCONF_ACK. We can
3681 * release the next asconf if we have one.
3682 */
3683 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
3684 SCTP_NULL());
3679 return SCTP_DISPOSITION_CONSUME; 3685 return SCTP_DISPOSITION_CONSUME;
3686 }
3680 3687
3681 abort = sctp_make_abort(asoc, asconf_ack, 3688 abort = sctp_make_abort(asoc, asconf_ack,
3682 sizeof(sctp_errhdr_t)); 3689 sizeof(sctp_errhdr_t));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 67fdac9d2d33..44a1ab03a3f0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -67,6 +67,7 @@
67#include <linux/poll.h> 67#include <linux/poll.h>
68#include <linux/init.h> 68#include <linux/init.h>
69#include <linux/crypto.h> 69#include <linux/crypto.h>
70#include <linux/slab.h>
70 71
71#include <net/ip.h> 72#include <net/ip.h>
72#include <net/icmp.h> 73#include <net/icmp.h>
@@ -3718,9 +3719,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3718 sp->hmac = NULL; 3719 sp->hmac = NULL;
3719 3720
3720 SCTP_DBG_OBJCNT_INC(sock); 3721 SCTP_DBG_OBJCNT_INC(sock);
3721 percpu_counter_inc(&sctp_sockets_allocated); 3722
3723 /* Set socket backlog limit. */
3724 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3722 3725
3723 local_bh_disable(); 3726 local_bh_disable();
3727 percpu_counter_inc(&sctp_sockets_allocated);
3724 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3728 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3725 local_bh_enable(); 3729 local_bh_enable();
3726 3730
@@ -3737,8 +3741,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3737 /* Release our hold on the endpoint. */ 3741 /* Release our hold on the endpoint. */
3738 ep = sctp_sk(sk)->ep; 3742 ep = sctp_sk(sk)->ep;
3739 sctp_endpoint_free(ep); 3743 sctp_endpoint_free(ep);
3740 percpu_counter_dec(&sctp_sockets_allocated);
3741 local_bh_disable(); 3744 local_bh_disable();
3745 percpu_counter_dec(&sctp_sockets_allocated);
3742 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3746 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3743 local_bh_enable(); 3747 local_bh_enable();
3744} 3748}
@@ -6185,6 +6189,16 @@ do_nonblock:
6185 goto out; 6189 goto out;
6186} 6190}
6187 6191
6192void sctp_data_ready(struct sock *sk, int len)
6193{
6194 read_lock_bh(&sk->sk_callback_lock);
6195 if (sk_has_sleeper(sk))
6196 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
6197 POLLRDNORM | POLLRDBAND);
6198 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6199 read_unlock_bh(&sk->sk_callback_lock);
6200}
6201
6188/* If socket sndbuf has changed, wake up all per association waiters. */ 6202/* If socket sndbuf has changed, wake up all per association waiters. */
6189void sctp_write_space(struct sock *sk) 6203void sctp_write_space(struct sock *sk)
6190{ 6204{
@@ -6359,7 +6373,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6359 struct sctp_association *asoc) 6373 struct sctp_association *asoc)
6360{ 6374{
6361 struct inet_sock *inet = inet_sk(sk); 6375 struct inet_sock *inet = inet_sk(sk);
6362 struct inet_sock *newinet = inet_sk(newsk); 6376 struct inet_sock *newinet;
6363 6377
6364 newsk->sk_type = sk->sk_type; 6378 newsk->sk_type = sk->sk_type;
6365 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6379 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 737d330e5ffc..442ad4ed6315 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -37,6 +37,7 @@
37 */ 37 */
38 38
39#include <linux/types.h> 39#include <linux/types.h>
40#include <linux/slab.h>
40#include <net/sctp/sctp.h> 41#include <net/sctp/sctp.h>
41#include <net/sctp/sm.h> 42#include <net/sctp/sm.h>
42 43
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index b827d21dbe54..be4d63d5a5cc 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -48,6 +48,7 @@
48 * be incorporated into the next SCTP release. 48 * be incorporated into the next SCTP release.
49 */ 49 */
50 50
51#include <linux/slab.h>
51#include <linux/types.h> 52#include <linux/types.h>
52#include <linux/random.h> 53#include <linux/random.h>
53#include <net/sctp/sctp.h> 54#include <net/sctp/sctp.h>
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 9bd64565021a..747d5412c463 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -42,6 +42,7 @@
42 * be incorporated into the next SCTP release. 42 * be incorporated into the next SCTP release.
43 */ 43 */
44 44
45#include <linux/slab.h>
45#include <linux/types.h> 46#include <linux/types.h>
46#include <linux/bitmap.h> 47#include <linux/bitmap.h>
47#include <net/sctp/sctp.h> 48#include <net/sctp/sctp.h>
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8b3560fd876d..aa72e89c3ee1 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -43,6 +43,7 @@
43 * be incorporated into the next SCTP release. 43 * be incorporated into the next SCTP release.
44 */ 44 */
45 45
46#include <linux/slab.h>
46#include <linux/types.h> 47#include <linux/types.h>
47#include <linux/skbuff.h> 48#include <linux/skbuff.h>
48#include <net/sctp/structs.h> 49#include <net/sctp/structs.h>
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 7b23803343cc..3a448536f0b6 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -41,6 +41,7 @@
41 * be incorporated into the next SCTP release. 41 * be incorporated into the next SCTP release.
42 */ 42 */
43 43
44#include <linux/slab.h>
44#include <linux/types.h> 45#include <linux/types.h>
45#include <linux/skbuff.h> 46#include <linux/skbuff.h>
46#include <net/sock.h> 47#include <net/sock.h>
diff --git a/net/socket.c b/net/socket.c
index 769c386bd428..5e8d0af3c0e7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -87,6 +87,7 @@
87#include <linux/wireless.h> 87#include <linux/wireless.h>
88#include <linux/nsproxy.h> 88#include <linux/nsproxy.h>
89#include <linux/magic.h> 89#include <linux/magic.h>
90#include <linux/slab.h>
90 91
91#include <asm/uaccess.h> 92#include <asm/uaccess.h>
92#include <asm/unistd.h> 93#include <asm/unistd.h>
@@ -2135,6 +2136,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2135 break; 2136 break;
2136 ++datagrams; 2137 ++datagrams;
2137 2138
2139 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2140 if (flags & MSG_WAITFORONE)
2141 flags |= MSG_DONTWAIT;
2142
2138 if (timeout) { 2143 if (timeout) {
2139 ktime_get_ts(timeout); 2144 ktime_get_ts(timeout);
2140 *timeout = timespec_sub(end_time, *timeout); 2145 *timeout = timespec_sub(end_time, *timeout);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 6dcdd2517819..1419d0cdbbac 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -18,6 +18,7 @@
18 18
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20#include <linux/sunrpc/clnt.h> 20#include <linux/sunrpc/clnt.h>
21#include <linux/slab.h>
21 22
22#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 23#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
23 24
@@ -71,8 +72,9 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
71 if (unlikely(len == 0)) 72 if (unlikely(len == 0))
72 return len; 73 return len;
73 74
74 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 75 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
75 !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) 76 return len;
77 if (sin6->sin6_scope_id == 0)
76 return len; 78 return len;
77 79
78 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", 80 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
@@ -165,8 +167,7 @@ static int rpc_parse_scope_id(const char *buf, const size_t buflen,
165 if (*delim != IPV6_SCOPE_DELIMITER) 167 if (*delim != IPV6_SCOPE_DELIMITER)
166 return 0; 168 return 0;
167 169
168 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 170 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
169 !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL))
170 return 0; 171 return 0;
171 172
172 len = (buf + buflen) - delim - 1; 173 len = (buf + buflen) - delim - 1;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index f394fc190a49..95afe79dd9d7 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -237,7 +237,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
237 list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { 237 list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
238 238
239 /* Enforce a 60 second garbage collection moratorium */ 239 /* Enforce a 60 second garbage collection moratorium */
240 if (time_in_range_open(cred->cr_expire, expired, jiffies) && 240 if (time_in_range(cred->cr_expire, expired, jiffies) &&
241 test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) 241 test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
242 continue; 242 continue;
243 243
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index bf88bf8e9365..8f623b0f03dd 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <linux/err.h> 7#include <linux/err.h>
8#include <linux/slab.h>
8#include <linux/types.h> 9#include <linux/types.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index f7a7f8380e38..c389ccf6437d 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -206,8 +206,14 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
206 ctx->gc_win = window_size; 206 ctx->gc_win = window_size;
207 /* gssd signals an error by passing ctx->gc_win = 0: */ 207 /* gssd signals an error by passing ctx->gc_win = 0: */
208 if (ctx->gc_win == 0) { 208 if (ctx->gc_win == 0) {
209 /* in which case, p points to an error code which we ignore */ 209 /*
210 p = ERR_PTR(-EACCES); 210 * in which case, p points to an error code. Anything other
211 * than -EKEYEXPIRED gets converted to -EACCES.
212 */
213 p = simple_get_bytes(p, end, &ret, sizeof(ret));
214 if (!IS_ERR(p))
215 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
216 ERR_PTR(-EACCES);
211 goto err; 217 goto err;
212 } 218 }
213 /* copy the opaque wire context */ 219 /* copy the opaque wire context */
@@ -646,6 +652,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
646 err = PTR_ERR(p); 652 err = PTR_ERR(p);
647 switch (err) { 653 switch (err) {
648 case -EACCES: 654 case -EACCES:
655 case -EKEYEXPIRED:
649 gss_msg->msg.errno = err; 656 gss_msg->msg.errno = err;
650 err = mlen; 657 err = mlen;
651 break; 658 break;
@@ -1273,9 +1280,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
1273 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1280 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1274 return 0; 1281 return 0;
1275out_free: 1282out_free:
1276 for (i--; i >= 0; i--) { 1283 rqstp->rq_enc_pages_num = i;
1277 __free_page(rqstp->rq_enc_pages[i]); 1284 priv_release_snd_buf(rqstp);
1278 }
1279out: 1285out:
1280 return -EAGAIN; 1286 return -EAGAIN;
1281} 1287}
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index c0ba39c4f5f2..310b78e99456 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -33,7 +33,6 @@
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/slab.h>
37#include <linux/string.h> 36#include <linux/string.h>
38#include <linux/sunrpc/sched.h> 37#include <linux/sunrpc/sched.h>
39#include <linux/sunrpc/gss_asn1.h> 38#include <linux/sunrpc/gss_asn1.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index c93fca204558..e9b636176687 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -37,7 +37,6 @@
37#include <linux/err.h> 37#include <linux/err.h>
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/slab.h>
41#include <linux/scatterlist.h> 40#include <linux/scatterlist.h>
42#include <linux/crypto.h> 41#include <linux/crypto.h>
43#include <linux/highmem.h> 42#include <linux/highmem.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index b8f42ef7178e..88fe6e75ed7e 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -59,7 +59,6 @@
59 */ 59 */
60 60
61#include <linux/types.h> 61#include <linux/types.h>
62#include <linux/slab.h>
63#include <linux/jiffies.h> 62#include <linux/jiffies.h>
64#include <linux/sunrpc/gss_krb5.h> 63#include <linux/sunrpc/gss_krb5.h>
65#include <linux/random.h> 64#include <linux/random.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 17562b4c35f6..6331cd6866ec 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/slab.h>
36#include <linux/sunrpc/gss_krb5.h> 35#include <linux/sunrpc/gss_krb5.h>
37#include <linux/crypto.h> 36#include <linux/crypto.h>
38 37
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 066ec73c84d6..ce6c247edad0 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -58,7 +58,6 @@
58 */ 58 */
59 59
60#include <linux/types.h> 60#include <linux/types.h>
61#include <linux/slab.h>
62#include <linux/jiffies.h> 61#include <linux/jiffies.h>
63#include <linux/sunrpc/gss_krb5.h> 62#include <linux/sunrpc/gss_krb5.h>
64#include <linux/crypto.h> 63#include <linux/crypto.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index ae8e69b59c4c..a6e905637e03 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -1,5 +1,4 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/slab.h>
3#include <linux/jiffies.h> 2#include <linux/jiffies.h>
4#include <linux/sunrpc/gss_krb5.h> 3#include <linux/sunrpc/gss_krb5.h>
5#include <linux/random.h> 4#include <linux/random.h>
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index c832712f8d55..5a3a65a0e2b4 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -34,7 +34,6 @@
34 */ 34 */
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/slab.h>
38#include <linux/jiffies.h> 37#include <linux/jiffies.h>
39#include <linux/sunrpc/gss_spkm3.h> 38#include <linux/sunrpc/gss_spkm3.h>
40#include <linux/random.h> 39#include <linux/random.h>
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e34bc531fcb9..b81e790ef9f4 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -37,6 +37,7 @@
37 * 37 *
38 */ 38 */
39 39
40#include <linux/slab.h>
40#include <linux/types.h> 41#include <linux/types.h>
41#include <linux/module.h> 42#include <linux/module.h>
42#include <linux/pagemap.h> 43#include <linux/pagemap.h>
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 46b2647c5bd2..aac2f8b4ee21 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7 */ 7 */
8 8
9#include <linux/slab.h>
9#include <linux/types.h> 10#include <linux/types.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <linux/module.h> 12#include <linux/module.h>
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 553621fb2c41..cf06af3b63c6 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -22,6 +22,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22******************************************************************************/ 22******************************************************************************/
23 23
24#include <linux/tcp.h> 24#include <linux/tcp.h>
25#include <linux/slab.h>
25#include <linux/sunrpc/xprt.h> 26#include <linux/sunrpc/xprt.h>
26 27
27#ifdef RPC_DEBUG 28#ifdef RPC_DEBUG
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 13f214f53120..f0c05d3311c1 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -37,21 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 37
38#define RPCDBG_FACILITY RPCDBG_SVCDSP 38#define RPCDBG_FACILITY RPCDBG_SVCDSP
39 39
40void bc_release_request(struct rpc_task *task)
41{
42 struct rpc_rqst *req = task->tk_rqstp;
43
44 dprintk("RPC: bc_release_request: task= %p\n", task);
45
46 /*
47 * Release this request only if it's a backchannel
48 * preallocated request
49 */
50 if (!bc_prealloc(req))
51 return;
52 xprt_free_bc_request(req);
53}
54
55/* Empty callback ops */ 40/* Empty callback ops */
56static const struct rpc_call_ops nfs41_callback_ops = { 41static const struct rpc_call_ops nfs41_callback_ops = {
57}; 42};
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 154034b675bd..19c9983d5360 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -659,6 +659,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
659 task = rpc_new_task(&task_setup_data); 659 task = rpc_new_task(&task_setup_data);
660 if (!task) { 660 if (!task) {
661 xprt_free_bc_request(req); 661 xprt_free_bc_request(req);
662 task = ERR_PTR(-ENOMEM);
662 goto out; 663 goto out;
663 } 664 }
664 task->tk_rqstp = req; 665 task->tk_rqstp = req;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 49278f830367..20e30c6f8355 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -78,7 +78,7 @@ rpc_timeout_upcall_queue(struct work_struct *work)
78} 78}
79 79
80/** 80/**
81 * rpc_queue_upcall 81 * rpc_queue_upcall - queue an upcall message to userspace
82 * @inode: inode of upcall pipe on which to queue given message 82 * @inode: inode of upcall pipe on which to queue given message
83 * @msg: message to queue 83 * @msg: message to queue
84 * 84 *
@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
587 struct dentry *dentry; 587 struct dentry *dentry;
588 588
589 dentry = __rpc_lookup_create(parent, name); 589 dentry = __rpc_lookup_create(parent, name);
590 if (IS_ERR(dentry))
591 return dentry;
590 if (dentry->d_inode == NULL) 592 if (dentry->d_inode == NULL)
591 return dentry; 593 return dentry;
592 dput(dentry); 594 dput(dentry);
@@ -999,19 +1001,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
999 inode = rpc_get_inode(sb, S_IFDIR | 0755); 1001 inode = rpc_get_inode(sb, S_IFDIR | 0755);
1000 if (!inode) 1002 if (!inode)
1001 return -ENOMEM; 1003 return -ENOMEM;
1002 root = d_alloc_root(inode); 1004 sb->s_root = root = d_alloc_root(inode);
1003 if (!root) { 1005 if (!root) {
1004 iput(inode); 1006 iput(inode);
1005 return -ENOMEM; 1007 return -ENOMEM;
1006 } 1008 }
1007 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) 1009 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
1008 goto out; 1010 return -ENOMEM;
1009 sb->s_root = root;
1010 return 0; 1011 return 0;
1011out:
1012 d_genocide(root);
1013 dput(root);
1014 return -ENOMEM;
1015} 1012}
1016 1013
1017static int 1014static int
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3e3772d8eb92..121105355f60 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/slab.h>
24#include <net/ipv6.h> 25#include <net/ipv6.h>
25 26
26#include <linux/sunrpc/clnt.h> 27#include <linux/sunrpc/clnt.h>
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index a661a3acb37e..10b4319ebbca 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/gfp.h>
11#include <linux/skbuff.h> 12#include <linux/skbuff.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/pagemap.h> 14#include <linux/pagemap.h>
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1b4e6791ecf3..5785d2037f45 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/slab.h>
16 17
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 538ca433a56c..d9017d64597e 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/slab.h>
22 23
23#include <linux/sunrpc/types.h> 24#include <linux/sunrpc/types.h>
24#include <linux/sunrpc/xdr.h> 25#include <linux/sunrpc/xdr.h>
@@ -133,7 +134,7 @@ svc_pool_map_choose_mode(void)
133 return SVC_POOL_PERNODE; 134 return SVC_POOL_PERNODE;
134 } 135 }
135 136
136 node = any_online_node(node_online_map); 137 node = first_online_node;
137 if (nr_cpus_node(node) > 2) { 138 if (nr_cpus_node(node) > 2) {
138 /* 139 /*
139 * Non-trivial SMP, or CONFIG_NUMA on 140 * Non-trivial SMP, or CONFIG_NUMA on
@@ -506,6 +507,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
506{ 507{
507 unsigned int pages, arghi; 508 unsigned int pages, arghi;
508 509
510 /* bc_xprt uses fore channel allocated buffers */
511 if (svc_is_backchannel(rqstp))
512 return 1;
513
509 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 514 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
510 * We assume one is at most one page 515 * We assume one is at most one page
511 */ 516 */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 7d1f9e928f69..061b2e0f9118 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/freezer.h> 10#include <linux/freezer.h>
11#include <linux/kthread.h> 11#include <linux/kthread.h>
12#include <linux/slab.h>
12#include <net/sock.h> 13#include <net/sock.h>
13#include <linux/sunrpc/stats.h> 14#include <linux/sunrpc/stats.h>
14#include <linux/sunrpc/svc_xprt.h> 15#include <linux/sunrpc/svc_xprt.h>
@@ -173,11 +174,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
173 .sin_addr.s_addr = htonl(INADDR_ANY), 174 .sin_addr.s_addr = htonl(INADDR_ANY),
174 .sin_port = htons(port), 175 .sin_port = htons(port),
175 }; 176 };
177#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
176 struct sockaddr_in6 sin6 = { 178 struct sockaddr_in6 sin6 = {
177 .sin6_family = AF_INET6, 179 .sin6_family = AF_INET6,
178 .sin6_addr = IN6ADDR_ANY_INIT, 180 .sin6_addr = IN6ADDR_ANY_INIT,
179 .sin6_port = htons(port), 181 .sin6_port = htons(port),
180 }; 182 };
183#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
181 struct sockaddr *sap; 184 struct sockaddr *sap;
182 size_t len; 185 size_t len;
183 186
@@ -186,10 +189,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
186 sap = (struct sockaddr *)&sin; 189 sap = (struct sockaddr *)&sin;
187 len = sizeof(sin); 190 len = sizeof(sin);
188 break; 191 break;
192#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
189 case PF_INET6: 193 case PF_INET6:
190 sap = (struct sockaddr *)&sin6; 194 sap = (struct sockaddr *)&sin6;
191 len = sizeof(sin6); 195 len = sizeof(sin6);
192 break; 196 break;
197#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
193 default: 198 default:
194 return ERR_PTR(-EAFNOSUPPORT); 199 return ERR_PTR(-EAFNOSUPPORT);
195 } 200 }
@@ -231,7 +236,10 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
231 err: 236 err:
232 spin_unlock(&svc_xprt_class_lock); 237 spin_unlock(&svc_xprt_class_lock);
233 dprintk("svc: transport %s not found\n", xprt_name); 238 dprintk("svc: transport %s not found\n", xprt_name);
234 return -ENOENT; 239
240 /* This errno is exposed to user space. Provide a reasonable
241 * perror msg for a bad transport. */
242 return -EPROTONOSUPPORT;
235} 243}
236EXPORT_SYMBOL_GPL(svc_create_xprt); 244EXPORT_SYMBOL_GPL(svc_create_xprt);
237 245
@@ -699,8 +707,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
699 spin_unlock_bh(&pool->sp_lock); 707 spin_unlock_bh(&pool->sp_lock);
700 708
701 len = 0; 709 len = 0;
702 if (test_bit(XPT_LISTENER, &xprt->xpt_flags) && 710 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
703 !test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 711 dprintk("svc_recv: found XPT_CLOSE\n");
712 svc_delete_xprt(xprt);
713 } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
704 struct svc_xprt *newxpt; 714 struct svc_xprt *newxpt;
705 newxpt = xprt->xpt_ops->xpo_accept(xprt); 715 newxpt = xprt->xpt_ops->xpo_accept(xprt);
706 if (newxpt) { 716 if (newxpt) {
@@ -726,7 +736,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
726 svc_xprt_received(newxpt); 736 svc_xprt_received(newxpt);
727 } 737 }
728 svc_xprt_received(xprt); 738 svc_xprt_received(xprt);
729 } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 739 } else {
730 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 740 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
731 rqstp, pool->sp_id, xprt, 741 rqstp, pool->sp_id, xprt,
732 atomic_read(&xprt->xpt_ref.refcount)); 742 atomic_read(&xprt->xpt_ref.refcount));
@@ -739,11 +749,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
739 dprintk("svc: got len=%d\n", len); 749 dprintk("svc: got len=%d\n", len);
740 } 750 }
741 751
742 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
743 dprintk("svc_recv: found XPT_CLOSE\n");
744 svc_delete_xprt(xprt);
745 }
746
747 /* No data, incomplete (TCP) read, or accept() */ 752 /* No data, incomplete (TCP) read, or accept() */
748 if (len == 0 || len == -EAGAIN) { 753 if (len == 0 || len == -EAGAIN) {
749 rqstp->rq_res.len = 0; 754 rqstp->rq_res.len = 0;
@@ -889,11 +894,8 @@ void svc_delete_xprt(struct svc_xprt *xprt)
889 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 894 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
890 serv->sv_tmpcnt--; 895 serv->sv_tmpcnt--;
891 896
892 for (dr = svc_deferred_dequeue(xprt); dr; 897 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
893 dr = svc_deferred_dequeue(xprt)) {
894 svc_xprt_put(xprt);
895 kfree(dr); 898 kfree(dr);
896 }
897 899
898 svc_xprt_put(xprt); 900 svc_xprt_put(xprt);
899 spin_unlock_bh(&serv->sv_lock); 901 spin_unlock_bh(&serv->sv_lock);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index d8c041114497..207311610988 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -10,11 +10,13 @@
10#include <linux/seq_file.h> 10#include <linux/seq_file.h>
11#include <linux/hash.h> 11#include <linux/hash.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/slab.h>
13#include <net/sock.h> 14#include <net/sock.h>
14#include <net/ipv6.h> 15#include <net/ipv6.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#define RPCDBG_FACILITY RPCDBG_AUTH 17#define RPCDBG_FACILITY RPCDBG_AUTH
17 18
19#include <linux/sunrpc/clnt.h>
18 20
19/* 21/*
20 * AUTHUNIX and AUTHNULL credentials are both handled here. 22 * AUTHUNIX and AUTHNULL credentials are both handled here.
@@ -187,10 +189,13 @@ static int ip_map_parse(struct cache_detail *cd,
187 * for scratch: */ 189 * for scratch: */
188 char *buf = mesg; 190 char *buf = mesg;
189 int len; 191 int len;
190 int b1, b2, b3, b4, b5, b6, b7, b8;
191 char c;
192 char class[8]; 192 char class[8];
193 struct in6_addr addr; 193 union {
194 struct sockaddr sa;
195 struct sockaddr_in s4;
196 struct sockaddr_in6 s6;
197 } address;
198 struct sockaddr_in6 sin6;
194 int err; 199 int err;
195 200
196 struct ip_map *ipmp; 201 struct ip_map *ipmp;
@@ -209,24 +214,24 @@ static int ip_map_parse(struct cache_detail *cd,
209 len = qword_get(&mesg, buf, mlen); 214 len = qword_get(&mesg, buf, mlen);
210 if (len <= 0) return -EINVAL; 215 if (len <= 0) return -EINVAL;
211 216
212 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) == 4) { 217 if (rpc_pton(buf, len, &address.sa, sizeof(address)) == 0)
213 addr.s6_addr32[0] = 0;
214 addr.s6_addr32[1] = 0;
215 addr.s6_addr32[2] = htonl(0xffff);
216 addr.s6_addr32[3] =
217 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
218 } else if (sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x%c",
219 &b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) {
220 addr.s6_addr16[0] = htons(b1);
221 addr.s6_addr16[1] = htons(b2);
222 addr.s6_addr16[2] = htons(b3);
223 addr.s6_addr16[3] = htons(b4);
224 addr.s6_addr16[4] = htons(b5);
225 addr.s6_addr16[5] = htons(b6);
226 addr.s6_addr16[6] = htons(b7);
227 addr.s6_addr16[7] = htons(b8);
228 } else
229 return -EINVAL; 218 return -EINVAL;
219 switch (address.sa.sa_family) {
220 case AF_INET:
221 /* Form a mapped IPv4 address in sin6 */
222 memset(&sin6, 0, sizeof(sin6));
223 sin6.sin6_family = AF_INET6;
224 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
225 sin6.sin6_addr.s6_addr32[3] = address.s4.sin_addr.s_addr;
226 break;
227#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
228 case AF_INET6:
229 memcpy(&sin6, &address.s6, sizeof(sin6));
230 break;
231#endif
232 default:
233 return -EINVAL;
234 }
230 235
231 expiry = get_expiry(&mesg); 236 expiry = get_expiry(&mesg);
232 if (expiry ==0) 237 if (expiry ==0)
@@ -243,7 +248,8 @@ static int ip_map_parse(struct cache_detail *cd,
243 } else 248 } else
244 dom = NULL; 249 dom = NULL;
245 250
246 ipmp = ip_map_lookup(class, &addr); 251 /* IPv6 scope IDs are ignored for now */
252 ipmp = ip_map_lookup(class, &sin6.sin6_addr);
247 if (ipmp) { 253 if (ipmp) {
248 err = ip_map_update(ipmp, 254 err = ip_map_update(ipmp,
249 container_of(dom, struct unix_domain, h), 255 container_of(dom, struct unix_domain, h),
@@ -619,7 +625,7 @@ static int unix_gid_show(struct seq_file *m,
619 else 625 else
620 glen = 0; 626 glen = 0;
621 627
622 seq_printf(m, "%d %d:", ug->uid, glen); 628 seq_printf(m, "%u %d:", ug->uid, glen);
623 for (i = 0; i < glen; i++) 629 for (i = 0; i < glen; i++)
624 seq_printf(m, " %d", GROUP_AT(ug->gi, i)); 630 seq_printf(m, " %d", GROUP_AT(ug->gi, i));
625 seq_printf(m, "\n"); 631 seq_printf(m, "\n");
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 870929e08e5d..a29f259204e6 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -968,6 +968,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
968 return len; 968 return len;
969 err_delete: 969 err_delete:
970 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 970 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
971 svc_xprt_received(&svsk->sk_xprt);
971 err_again: 972 err_again:
972 return -EAGAIN; 973 return -EAGAIN;
973} 974}
@@ -1357,7 +1358,7 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
1357 1358
1358 if (!so) 1359 if (!so)
1359 return err; 1360 return err;
1360 if (so->sk->sk_family != AF_INET) 1361 if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6))
1361 err = -EAFNOSUPPORT; 1362 err = -EAFNOSUPPORT;
1362 else if (so->sk->sk_protocol != IPPROTO_TCP && 1363 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1363 so->sk->sk_protocol != IPPROTO_UDP) 1364 so->sk->sk_protocol != IPPROTO_UDP)
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 8bd690c48b69..2763fde88499 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h>
10#include <linux/types.h> 11#include <linux/types.h>
11#include <linux/string.h> 12#include <linux/string.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 469de292c23c..42f09ade0044 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -46,6 +46,7 @@
46 46
47#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/metrics.h> 48#include <linux/sunrpc/metrics.h>
49#include <linux/sunrpc/bc_xprt.h>
49 50
50#include "sunrpc.h" 51#include "sunrpc.h"
51 52
@@ -1032,21 +1033,16 @@ void xprt_release(struct rpc_task *task)
1032 if (req->rq_release_snd_buf) 1033 if (req->rq_release_snd_buf)
1033 req->rq_release_snd_buf(req); 1034 req->rq_release_snd_buf(req);
1034 1035
1035 /*
1036 * Early exit if this is a backchannel preallocated request.
1037 * There is no need to have it added to the RPC slot list.
1038 */
1039 if (is_bc_request)
1040 return;
1041
1042 memset(req, 0, sizeof(*req)); /* mark unused */
1043
1044 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1036 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1037 if (likely(!is_bc_request)) {
1038 memset(req, 0, sizeof(*req)); /* mark unused */
1045 1039
1046 spin_lock(&xprt->reserve_lock); 1040 spin_lock(&xprt->reserve_lock);
1047 list_add(&req->rq_list, &xprt->free); 1041 list_add(&req->rq_list, &xprt->free);
1048 rpc_wake_up_next(&xprt->backlog); 1042 rpc_wake_up_next(&xprt->backlog);
1049 spin_unlock(&xprt->reserve_lock); 1043 spin_unlock(&xprt->reserve_lock);
1044 } else
1045 xprt_free_bc_request(req);
1050} 1046}
1051 1047
1052/** 1048/**
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 5b8a8ff93a25..d718b8fa9525 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -40,6 +40,7 @@
40 */ 40 */
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/slab.h>
43#include <linux/fs.h> 44#include <linux/fs.h>
44#include <linux/sysctl.h> 45#include <linux/sysctl.h>
45#include <linux/sunrpc/clnt.h> 46#include <linux/sunrpc/clnt.h>
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 3fa5751af0ec..edea15a54e51 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -43,6 +43,7 @@
43#include <linux/sunrpc/debug.h> 43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h> 44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <linux/slab.h>
46#include <linux/spinlock.h> 47#include <linux/spinlock.h>
47#include <rdma/ib_verbs.h> 48#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h> 49#include <rdma/rdma_cm.h>
@@ -678,7 +679,10 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
678 int ret; 679 int ret;
679 680
680 dprintk("svcrdma: Creating RDMA socket\n"); 681 dprintk("svcrdma: Creating RDMA socket\n");
681 682 if (sa->sa_family != AF_INET) {
683 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
684 return ERR_PTR(-EAFNOSUPPORT);
685 }
682 cma_xprt = rdma_create_xprt(serv, 1); 686 cma_xprt = rdma_create_xprt(serv, 1);
683 if (!cma_xprt) 687 if (!cma_xprt)
684 return ERR_PTR(-ENOMEM); 688 return ERR_PTR(-ENOMEM);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 7018eef1dcdd..187257b1d880 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -49,6 +49,7 @@
49 49
50#include <linux/module.h> 50#include <linux/module.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/slab.h>
52#include <linux/seq_file.h> 53#include <linux/seq_file.h>
53 54
54#include "xprt_rdma.h" 55#include "xprt_rdma.h"
@@ -160,16 +161,15 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt)
160 (void)rpc_ntop(sap, buf, sizeof(buf)); 161 (void)rpc_ntop(sap, buf, sizeof(buf));
161 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 162 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
162 163
163 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 164 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
164 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 165 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
165 166
166 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 167 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
167 168
168 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", 169 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
169 NIPQUAD(sin->sin_addr.s_addr));
170 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 170 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
171 171
172 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 172 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
173 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 173 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
174 174
175 /* netid */ 175 /* netid */
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 2209aa87d899..27015c6d8eb5 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -48,6 +48,7 @@
48 */ 48 */
49 49
50#include <linux/pci.h> /* for Tavor hack below */ 50#include <linux/pci.h> /* for Tavor hack below */
51#include <linux/slab.h>
51 52
52#include "xprt_rdma.h" 53#include "xprt_rdma.h"
53 54
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3d739e5d15d8..9847c30b5001 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -297,12 +297,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
297 switch (sap->sa_family) { 297 switch (sap->sa_family) {
298 case AF_INET: 298 case AF_INET:
299 sin = xs_addr_in(xprt); 299 sin = xs_addr_in(xprt);
300 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", 300 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
301 NIPQUAD(sin->sin_addr.s_addr));
302 break; 301 break;
303 case AF_INET6: 302 case AF_INET6:
304 sin6 = xs_addr_in6(xprt); 303 sin6 = xs_addr_in6(xprt);
305 (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 304 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
306 break; 305 break;
307 default: 306 default:
308 BUG(); 307 BUG();
@@ -315,10 +314,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
315 struct sockaddr *sap = xs_addr(xprt); 314 struct sockaddr *sap = xs_addr(xprt);
316 char buf[128]; 315 char buf[128];
317 316
318 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 317 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
319 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 318 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
320 319
321 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 320 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
322 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 321 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
323} 322}
324 323
@@ -549,8 +548,6 @@ static int xs_udp_send_request(struct rpc_task *task)
549 /* Still some bytes left; set up for a retry later. */ 548 /* Still some bytes left; set up for a retry later. */
550 status = -EAGAIN; 549 status = -EAGAIN;
551 } 550 }
552 if (!transport->sock)
553 goto out;
554 551
555 switch (status) { 552 switch (status) {
556 case -ENOTSOCK: 553 case -ENOTSOCK:
@@ -570,7 +567,7 @@ static int xs_udp_send_request(struct rpc_task *task)
570 * prompts ECONNREFUSED. */ 567 * prompts ECONNREFUSED. */
571 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 568 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
572 } 569 }
573out: 570
574 return status; 571 return status;
575} 572}
576 573
@@ -652,8 +649,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
652 status = -EAGAIN; 649 status = -EAGAIN;
653 break; 650 break;
654 } 651 }
655 if (!transport->sock)
656 goto out;
657 652
658 switch (status) { 653 switch (status) {
659 case -ENOTSOCK: 654 case -ENOTSOCK:
@@ -673,7 +668,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
673 case -ENOTCONN: 668 case -ENOTCONN:
674 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 669 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
675 } 670 }
676out: 671
677 return status; 672 return status;
678} 673}
679 674
@@ -1912,6 +1907,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1912 case -EALREADY: 1907 case -EALREADY:
1913 xprt_clear_connecting(xprt); 1908 xprt_clear_connecting(xprt);
1914 return; 1909 return;
1910 case -EINVAL:
1911 /* Happens, for instance, if the user specified a link
1912 * local IPv6 address without a scope-id.
1913 */
1914 goto out;
1915 } 1915 }
1916out_eagain: 1916out_eagain:
1917 status = -EAGAIN; 1917 status = -EAGAIN;
@@ -2100,7 +2100,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2101 * to use the server side send routines. 2101 * to use the server side send routines.
2102 */ 2102 */
2103void *bc_malloc(struct rpc_task *task, size_t size) 2103static void *bc_malloc(struct rpc_task *task, size_t size)
2104{ 2104{
2105 struct page *page; 2105 struct page *page;
2106 struct rpc_buffer *buf; 2106 struct rpc_buffer *buf;
@@ -2120,7 +2120,7 @@ void *bc_malloc(struct rpc_task *task, size_t size)
2120/* 2120/*
2121 * Free the space allocated in the bc_alloc routine 2121 * Free the space allocated in the bc_alloc routine
2122 */ 2122 */
2123void bc_free(void *buffer) 2123static void bc_free(void *buffer)
2124{ 2124{
2125 struct rpc_buffer *buf; 2125 struct rpc_buffer *buf;
2126 2126
@@ -2251,9 +2251,6 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2251 .buf_free = rpc_free, 2251 .buf_free = rpc_free,
2252 .send_request = xs_tcp_send_request, 2252 .send_request = xs_tcp_send_request,
2253 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2253 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2254#if defined(CONFIG_NFS_V4_1)
2255 .release_request = bc_release_request,
2256#endif /* CONFIG_NFS_V4_1 */
2257 .close = xs_tcp_close, 2254 .close = xs_tcp_close,
2258 .destroy = xs_destroy, 2255 .destroy = xs_destroy,
2259 .print_stats = xs_tcp_print_stats, 2256 .print_stats = xs_tcp_print_stats,
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 0b15d7250c40..53196009160a 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -71,7 +71,7 @@ static struct ctl_table_root net_sysctl_ro_root = {
71 .permissions = net_ctl_ro_header_perms, 71 .permissions = net_ctl_ro_header_perms,
72}; 72};
73 73
74static int sysctl_net_init(struct net *net) 74static int __net_init sysctl_net_init(struct net *net)
75{ 75{
76 setup_sysctl_set(&net->sysctls, 76 setup_sysctl_set(&net->sysctls,
77 &net_sysctl_ro_root.default_set, 77 &net_sysctl_ro_root.default_set,
@@ -79,7 +79,7 @@ static int sysctl_net_init(struct net *net)
79 return 0; 79 return 0;
80} 80}
81 81
82static void sysctl_net_exit(struct net *net) 82static void __net_exit sysctl_net_exit(struct net *net)
83{ 83{
84 WARN_ON(!list_empty(&net->sysctls.list)); 84 WARN_ON(!list_empty(&net->sysctls.list));
85 return; 85 return;
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 3b30d1130b61..b74f78d0c033 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -10,7 +10,7 @@ menuconfig TIPC
10 specially designed for intra cluster communication. This protocol 10 specially designed for intra cluster communication. This protocol
11 originates from Ericsson where it has been used in carrier grade 11 originates from Ericsson where it has been used in carrier grade
12 cluster applications for many years. 12 cluster applications for many years.
13 13
14 For more information about TIPC, see http://tipc.sourceforge.net. 14 For more information about TIPC, see http://tipc.sourceforge.net.
15 15
16 This protocol support is also available as a module ( = code which 16 This protocol support is also available as a module ( = code which
@@ -23,91 +23,76 @@ menuconfig TIPC
23if TIPC 23if TIPC
24 24
25config TIPC_ADVANCED 25config TIPC_ADVANCED
26 bool "TIPC: Advanced configuration" 26 bool "Advanced TIPC configuration"
27 default n 27 default n
28 help 28 help
29 Saying Y here will open some advanced configuration 29 Saying Y here will open some advanced configuration for TIPC.
30 for TIPC. Most users do not need to bother, so if 30 Most users do not need to bother; if unsure, just say N.
31 unsure, just say N.
32 31
33config TIPC_ZONES 32config TIPC_ZONES
34 int "Maximum number of zones in network" 33 int "Maximum number of zones in a network"
35 depends on TIPC_ADVANCED 34 depends on TIPC_ADVANCED
35 range 1 255
36 default "3" 36 default "3"
37 help 37 help
38 Max number of zones inside TIPC network. Max supported value 38 Specifies how many zones can be supported in a TIPC network.
39 is 255 zones, minimum is 1 39 Can range from 1 to 255 zones; default is 3.
40 40
41 Default is 3 zones in a network; setting this to higher 41 Setting this to a smaller value saves some memory;
42 allows more zones but might use more memory. 42 setting it to a higher value allows for more zones.
43 43
44config TIPC_CLUSTERS 44config TIPC_CLUSTERS
45 int "Maximum number of clusters in a zone" 45 int "Maximum number of clusters in a zone"
46 depends on TIPC_ADVANCED 46 depends on TIPC_ADVANCED
47 range 1 1
47 default "1" 48 default "1"
48 help 49 help
49 ***Only 1 (one cluster in a zone) is supported by current code. 50 Specifies how many clusters can be supported in a TIPC zone.
50 Any value set here will be overridden.***
51
52 (Max number of clusters inside TIPC zone. Max supported
53 value is 4095 clusters, minimum is 1.
54 51
55 Default is 1; setting this to smaller value might save 52 *** Currently TIPC only supports a single cluster per zone. ***
56 some memory, setting it to higher
57 allows more clusters and might consume more memory.)
58 53
59config TIPC_NODES 54config TIPC_NODES
60 int "Maximum number of nodes in cluster" 55 int "Maximum number of nodes in a cluster"
61 depends on TIPC_ADVANCED 56 depends on TIPC_ADVANCED
57 range 8 2047
62 default "255" 58 default "255"
63 help 59 help
64 Maximum number of nodes inside a TIPC cluster. Maximum 60 Specifies how many nodes can be supported in a TIPC cluster.
65 supported value is 2047 nodes, minimum is 8. 61 Can range from 8 to 2047 nodes; default is 255.
66
67 Setting this to a smaller value saves some memory,
68 setting it to higher allows more nodes.
69
70config TIPC_SLAVE_NODES
71 int "Maximum number of slave nodes in cluster"
72 depends on TIPC_ADVANCED
73 default "0"
74 help
75 ***This capability is not supported by current code.***
76
77 Maximum number of slave nodes inside a TIPC cluster. Maximum
78 supported value is 2047 nodes, minimum is 0.
79 62
80 Setting this to a smaller value saves some memory, 63 Setting this to a smaller value saves some memory;
81 setting it to higher allows more nodes. 64 setting it to higher allows for more nodes.
82 65
83config TIPC_PORTS 66config TIPC_PORTS
84 int "Maximum number of ports in a node" 67 int "Maximum number of ports in a node"
85 depends on TIPC_ADVANCED 68 depends on TIPC_ADVANCED
69 range 127 65535
86 default "8191" 70 default "8191"
87 help 71 help
88 Maximum number of ports within a node. Maximum 72 Specifies how many ports can be supported by a node.
89 supported value is 64535 nodes, minimum is 127. 73 Can range from 127 to 65535 ports; default is 8191.
90 74
91 Setting this to a smaller value saves some memory, 75 Setting this to a smaller value saves some memory,
92 setting it to higher allows more ports. 76 setting it to higher allows for more ports.
93 77
94config TIPC_LOG 78config TIPC_LOG
95 int "Size of log buffer" 79 int "Size of log buffer"
96 depends on TIPC_ADVANCED 80 depends on TIPC_ADVANCED
97 default 0 81 range 0 32768
82 default "0"
98 help 83 help
99 Size (in bytes) of TIPC's internal log buffer, which records the 84 Size (in bytes) of TIPC's internal log buffer, which records the
100 occurrence of significant events. Maximum supported value 85 occurrence of significant events. Can range from 0 to 32768 bytes;
101 is 32768 bytes, minimum is 0. 86 default is 0.
102 87
103 There is no need to enable the log buffer unless the node will be 88 There is no need to enable the log buffer unless the node will be
104 managed remotely via TIPC. 89 managed remotely via TIPC.
105 90
106config TIPC_DEBUG 91config TIPC_DEBUG
107 bool "Enable debugging support" 92 bool "Enable debug messages"
108 default n 93 default n
109 help 94 help
110 This will enable debugging of TIPC. 95 This enables debugging of TIPC.
111 96
112 Only say Y here if you are having trouble with TIPC. It will 97 Only say Y here if you are having trouble with TIPC. It will
113 enable the display of detailed information about what is going on. 98 enable the display of detailed information about what is going on.
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 327011fcc407..78091375ca12 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -45,10 +45,10 @@
45 45
46#define MAX_ADDR_STR 32 46#define MAX_ADDR_STR 32
47 47
48static struct media *media_list = NULL; 48static struct media media_list[MAX_MEDIA];
49static u32 media_count = 0; 49static u32 media_count = 0;
50 50
51struct bearer *tipc_bearers = NULL; 51struct bearer tipc_bearers[MAX_BEARERS];
52 52
53/** 53/**
54 * media_name_valid - validate media name 54 * media_name_valid - validate media name
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
108 int res = -EINVAL; 108 int res = -EINVAL;
109 109
110 write_lock_bh(&tipc_net_lock); 110 write_lock_bh(&tipc_net_lock);
111 if (!media_list)
112 goto exit;
113 111
112 if (tipc_mode != TIPC_NET_MODE) {
113 warn("Media <%s> rejected, not in networked mode yet\n", name);
114 goto exit;
115 }
114 if (!media_name_valid(name)) { 116 if (!media_name_valid(name)) {
115 warn("Media <%s> rejected, illegal name\n", name); 117 warn("Media <%s> rejected, illegal name\n", name);
116 goto exit; 118 goto exit;
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
660 662
661 663
662 664
663int tipc_bearer_init(void)
664{
665 int res;
666
667 write_lock_bh(&tipc_net_lock);
668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
670 if (tipc_bearers && media_list) {
671 res = 0;
672 } else {
673 kfree(tipc_bearers);
674 kfree(media_list);
675 tipc_bearers = NULL;
676 media_list = NULL;
677 res = -ENOMEM;
678 }
679 write_unlock_bh(&tipc_net_lock);
680 return res;
681}
682
683void tipc_bearer_stop(void) 665void tipc_bearer_stop(void)
684{ 666{
685 u32 i; 667 u32 i;
686 668
687 if (!tipc_bearers)
688 return;
689
690 for (i = 0; i < MAX_BEARERS; i++) { 669 for (i = 0; i < MAX_BEARERS; i++) {
691 if (tipc_bearers[i].active) 670 if (tipc_bearers[i].active)
692 tipc_bearers[i].publ.blocked = 1; 671 tipc_bearers[i].publ.blocked = 1;
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
695 if (tipc_bearers[i].active) 674 if (tipc_bearers[i].active)
696 bearer_disable(tipc_bearers[i].publ.name); 675 bearer_disable(tipc_bearers[i].publ.name);
697 } 676 }
698 kfree(tipc_bearers);
699 kfree(media_list);
700 tipc_bearers = NULL;
701 media_list = NULL;
702 media_count = 0; 677 media_count = 0;
703} 678}
704 679
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ca5734892713..000228e93f9e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -114,7 +114,7 @@ struct bearer_name {
114 114
115struct link; 115struct link;
116 116
117extern struct bearer *tipc_bearers; 117extern struct bearer tipc_bearers[];
118 118
119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *tipc_media_get_names(void); 120struct sk_buff *tipc_media_get_names(void);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3256bd7d398f..52c571fedbe0 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -189,11 +189,11 @@ static int __init tipc_init(void)
189 tipc_remote_management = 1; 189 tipc_remote_management = 1;
190 tipc_max_publications = 10000; 190 tipc_max_publications = 10000;
191 tipc_max_subscriptions = 2000; 191 tipc_max_subscriptions = 2000;
192 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536); 192 tipc_max_ports = CONFIG_TIPC_PORTS;
193 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255); 193 tipc_max_zones = CONFIG_TIPC_ZONES;
194 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1); 194 tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
195 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047); 195 tipc_max_nodes = CONFIG_TIPC_NODES;
196 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 196 tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
197 tipc_net_id = 4711; 197 tipc_net_id = 4711;
198 198
199 if ((res = tipc_core_start())) 199 if ((res = tipc_core_start()))
diff --git a/net/tipc/core.h b/net/tipc/core.h
index a881f92a8537..c58a1d16563a 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -56,6 +56,7 @@
56#include <linux/netdevice.h> 56#include <linux/netdevice.h>
57#include <linux/in.h> 57#include <linux/in.h>
58#include <linux/list.h> 58#include <linux/list.h>
59#include <linux/slab.h>
59#include <linux/vmalloc.h> 60#include <linux/vmalloc.h>
60 61
61/* 62/*
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 524ba5696d4d..6230d16020c4 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -38,6 +38,7 @@
38#include <net/tipc/tipc_bearer.h> 38#include <net/tipc/tipc_bearer.h>
39#include <net/tipc/tipc_msg.h> 39#include <net/tipc/tipc_msg.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/slab.h>
41#include <net/net_namespace.h> 42#include <net/net_namespace.h>
42 43
43#define MAX_ETH_BEARERS 2 44#define MAX_ETH_BEARERS 2
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6f50f6423f63..1a7e4665af80 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1882,6 +1882,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1882 (msg_destnode(msg) != tipc_own_addr))) 1882 (msg_destnode(msg) != tipc_own_addr)))
1883 goto cont; 1883 goto cont;
1884 1884
1885 /* Discard non-routeable messages destined for another node */
1886
1887 if (unlikely(!msg_isdata(msg) &&
1888 (msg_destnode(msg) != tipc_own_addr))) {
1889 if ((msg_user(msg) != CONN_MANAGER) &&
1890 (msg_user(msg) != MSG_FRAGMENTER))
1891 goto cont;
1892 }
1893
1885 /* Locate unicast link endpoint that should handle message */ 1894 /* Locate unicast link endpoint that should handle message */
1886 1895
1887 n_ptr = tipc_node_find(msg_prevnode(msg)); 1896 n_ptr = tipc_node_find(msg_prevnode(msg));
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7906608bf510..f25b1cdb64eb 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,8 @@
116*/ 116*/
117 117
118DEFINE_RWLOCK(tipc_net_lock); 118DEFINE_RWLOCK(tipc_net_lock);
119struct network tipc_net = { NULL }; 119struct _zone *tipc_zones[256] = { NULL, };
120struct network tipc_net = { tipc_zones };
120 121
121struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) 122struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{ 123{
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
158 } 159 }
159} 160}
160 161
161static int net_init(void)
162{
163 memset(&tipc_net, 0, sizeof(tipc_net));
164 tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
165 if (!tipc_net.zones) {
166 return -ENOMEM;
167 }
168 return 0;
169}
170
171static void net_stop(void) 162static void net_stop(void)
172{ 163{
173 u32 z_num; 164 u32 z_num;
174 165
175 if (!tipc_net.zones) 166 for (z_num = 1; z_num <= tipc_max_zones; z_num++)
176 return;
177
178 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
179 tipc_zone_delete(tipc_net.zones[z_num]); 167 tipc_zone_delete(tipc_net.zones[z_num]);
180 }
181 kfree(tipc_net.zones);
182 tipc_net.zones = NULL;
183} 168}
184 169
185static void net_route_named_msg(struct sk_buff *buf) 170static void net_route_named_msg(struct sk_buff *buf)
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
282 tipc_named_reinit(); 267 tipc_named_reinit();
283 tipc_port_reinit(); 268 tipc_port_reinit();
284 269
285 if ((res = tipc_bearer_init()) || 270 if ((res = tipc_cltr_init()) ||
286 (res = net_init()) ||
287 (res = tipc_cltr_init()) ||
288 (res = tipc_bclink_init())) { 271 (res = tipc_bclink_init())) {
289 return res; 272 return res;
290 } 273 }
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 414fc34b8bea..8dea66500cf5 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -153,11 +153,11 @@ void tipc_ref_table_stop(void)
153 153
154u32 tipc_ref_acquire(void *object, spinlock_t **lock) 154u32 tipc_ref_acquire(void *object, spinlock_t **lock)
155{ 155{
156 struct reference *entry;
157 u32 index; 156 u32 index;
158 u32 index_mask; 157 u32 index_mask;
159 u32 next_plus_upper; 158 u32 next_plus_upper;
160 u32 ref; 159 u32 ref;
160 struct reference *entry = NULL;
161 161
162 if (!object) { 162 if (!object) {
163 err("Attempt to acquire reference to non-existent object\n"); 163 err("Attempt to acquire reference to non-existent object\n");
@@ -175,30 +175,36 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
175 index = tipc_ref_table.first_free; 175 index = tipc_ref_table.first_free;
176 entry = &(tipc_ref_table.entries[index]); 176 entry = &(tipc_ref_table.entries[index]);
177 index_mask = tipc_ref_table.index_mask; 177 index_mask = tipc_ref_table.index_mask;
178 /* take lock in case a previous user of entry still holds it */
179 spin_lock_bh(&entry->lock);
180 next_plus_upper = entry->ref; 178 next_plus_upper = entry->ref;
181 tipc_ref_table.first_free = next_plus_upper & index_mask; 179 tipc_ref_table.first_free = next_plus_upper & index_mask;
182 ref = (next_plus_upper & ~index_mask) + index; 180 ref = (next_plus_upper & ~index_mask) + index;
183 entry->ref = ref;
184 entry->object = object;
185 *lock = &entry->lock;
186 } 181 }
187 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { 182 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
188 index = tipc_ref_table.init_point++; 183 index = tipc_ref_table.init_point++;
189 entry = &(tipc_ref_table.entries[index]); 184 entry = &(tipc_ref_table.entries[index]);
190 spin_lock_init(&entry->lock); 185 spin_lock_init(&entry->lock);
191 spin_lock_bh(&entry->lock);
192 ref = tipc_ref_table.start_mask + index; 186 ref = tipc_ref_table.start_mask + index;
193 entry->ref = ref;
194 entry->object = object;
195 *lock = &entry->lock;
196 } 187 }
197 else { 188 else {
198 ref = 0; 189 ref = 0;
199 } 190 }
200 write_unlock_bh(&ref_table_lock); 191 write_unlock_bh(&ref_table_lock);
201 192
193 /*
194 * Grab the lock so no one else can modify this entry
195 * While we assign its ref value & object pointer
196 */
197 if (entry) {
198 spin_lock_bh(&entry->lock);
199 entry->ref = ref;
200 entry->object = object;
201 *lock = &entry->lock;
202 /*
203 * keep it locked, the caller is responsible
204 * for unlocking this when they're done with it
205 */
206 }
207
202 return ref; 208 return ref;
203} 209}
204 210
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1ea64f09cc45..cfb20b80b3a1 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -40,9 +40,9 @@
40#include <linux/socket.h> 40#include <linux/socket.h>
41#include <linux/errno.h> 41#include <linux/errno.h>
42#include <linux/mm.h> 42#include <linux/mm.h>
43#include <linux/slab.h>
44#include <linux/poll.h> 43#include <linux/poll.h>
45#include <linux/fcntl.h> 44#include <linux/fcntl.h>
45#include <linux/gfp.h>
46#include <asm/string.h> 46#include <asm/string.h>
47#include <asm/atomic.h> 47#include <asm/atomic.h>
48#include <net/sock.h> 48#include <net/sock.h>
@@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1322 if (!sock_owned_by_user(sk)) { 1322 if (!sock_owned_by_user(sk)) {
1323 res = filter_rcv(sk, buf); 1323 res = filter_rcv(sk, buf);
1324 } else { 1324 } else {
1325 sk_add_backlog(sk, buf); 1325 if (sk_add_backlog(sk, buf))
1326 res = TIPC_OK; 1326 res = TIPC_ERR_OVERLOAD;
1327 else
1328 res = TIPC_OK;
1327 } 1329 }
1328 bh_unlock_sock(sk); 1330 bh_unlock_sock(sk);
1329 1331
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ac91f0dfa144..ff123e56114a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -76,19 +76,6 @@ struct top_srv {
76static struct top_srv topsrv = { 0 }; 76static struct top_srv topsrv = { 0 };
77 77
78/** 78/**
79 * htohl - convert value to endianness used by destination
80 * @in: value to convert
81 * @swap: non-zero if endianness must be reversed
82 *
83 * Returns converted value
84 */
85
86static u32 htohl(u32 in, int swap)
87{
88 return swap ? swab32(in) : in;
89}
90
91/**
92 * subscr_send_event - send a message containing a tipc_event to the subscriber 79 * subscr_send_event - send a message containing a tipc_event to the subscriber
93 * 80 *
94 * Note: Must not hold subscriber's server port lock, since tipc_send() will 81 * Note: Must not hold subscriber's server port lock, since tipc_send() will
@@ -107,11 +94,11 @@ static void subscr_send_event(struct subscription *sub,
107 msg_sect.iov_base = (void *)&sub->evt; 94 msg_sect.iov_base = (void *)&sub->evt;
108 msg_sect.iov_len = sizeof(struct tipc_event); 95 msg_sect.iov_len = sizeof(struct tipc_event);
109 96
110 sub->evt.event = htohl(event, sub->swap); 97 sub->evt.event = htonl(event);
111 sub->evt.found_lower = htohl(found_lower, sub->swap); 98 sub->evt.found_lower = htonl(found_lower);
112 sub->evt.found_upper = htohl(found_upper, sub->swap); 99 sub->evt.found_upper = htonl(found_upper);
113 sub->evt.port.ref = htohl(port_ref, sub->swap); 100 sub->evt.port.ref = htonl(port_ref);
114 sub->evt.port.node = htohl(node, sub->swap); 101 sub->evt.port.node = htonl(node);
115 tipc_send(sub->server_ref, 1, &msg_sect); 102 tipc_send(sub->server_ref, 1, &msg_sect);
116} 103}
117 104
@@ -287,16 +274,23 @@ static void subscr_cancel(struct tipc_subscr *s,
287{ 274{
288 struct subscription *sub; 275 struct subscription *sub;
289 struct subscription *sub_temp; 276 struct subscription *sub_temp;
277 __u32 type, lower, upper;
290 int found = 0; 278 int found = 0;
291 279
292 /* Find first matching subscription, exit if not found */ 280 /* Find first matching subscription, exit if not found */
293 281
282 type = ntohl(s->seq.type);
283 lower = ntohl(s->seq.lower);
284 upper = ntohl(s->seq.upper);
285
294 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 286 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
295 subscription_list) { 287 subscription_list) {
296 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { 288 if ((type == sub->seq.type) &&
297 found = 1; 289 (lower == sub->seq.lower) &&
298 break; 290 (upper == sub->seq.upper)) {
299 } 291 found = 1;
292 break;
293 }
300 } 294 }
301 if (!found) 295 if (!found)
302 return; 296 return;
@@ -325,16 +319,10 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
325 struct subscriber *subscriber) 319 struct subscriber *subscriber)
326{ 320{
327 struct subscription *sub; 321 struct subscription *sub;
328 int swap;
329
330 /* Determine subscriber's endianness */
331
332 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
333 322
334 /* Detect & process a subscription cancellation request */ 323 /* Detect & process a subscription cancellation request */
335 324
336 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { 325 if (ntohl(s->filter) & TIPC_SUB_CANCEL) {
337 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
338 subscr_cancel(s, subscriber); 326 subscr_cancel(s, subscriber);
339 return NULL; 327 return NULL;
340 } 328 }
@@ -359,11 +347,11 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
359 347
360 /* Initialize subscription object */ 348 /* Initialize subscription object */
361 349
362 sub->seq.type = htohl(s->seq.type, swap); 350 sub->seq.type = ntohl(s->seq.type);
363 sub->seq.lower = htohl(s->seq.lower, swap); 351 sub->seq.lower = ntohl(s->seq.lower);
364 sub->seq.upper = htohl(s->seq.upper, swap); 352 sub->seq.upper = ntohl(s->seq.upper);
365 sub->timeout = htohl(s->timeout, swap); 353 sub->timeout = ntohl(s->timeout);
366 sub->filter = htohl(s->filter, swap); 354 sub->filter = ntohl(s->filter);
367 if ((!(sub->filter & TIPC_SUB_PORTS) == 355 if ((!(sub->filter & TIPC_SUB_PORTS) ==
368 !(sub->filter & TIPC_SUB_SERVICE)) || 356 !(sub->filter & TIPC_SUB_SERVICE)) ||
369 (sub->seq.lower > sub->seq.upper)) { 357 (sub->seq.lower > sub->seq.upper)) {
@@ -376,7 +364,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
376 INIT_LIST_HEAD(&sub->nameseq_list); 364 INIT_LIST_HEAD(&sub->nameseq_list);
377 list_add(&sub->subscription_list, &subscriber->subscription_list); 365 list_add(&sub->subscription_list, &subscriber->subscription_list);
378 sub->server_ref = subscriber->port_ref; 366 sub->server_ref = subscriber->port_ref;
379 sub->swap = swap;
380 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); 367 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
381 atomic_inc(&topsrv.subscription_count); 368 atomic_inc(&topsrv.subscription_count);
382 if (sub->timeout != TIPC_WAIT_FOREVER) { 369 if (sub->timeout != TIPC_WAIT_FOREVER) {
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf4d202..c20f496d95b2 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -53,7 +53,6 @@ typedef void (*tipc_subscr_event) (struct subscription *sub,
53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list 53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
54 * @subscription_list: adjacent subscriptions in subscriber's subscription list 54 * @subscription_list: adjacent subscriptions in subscriber's subscription list
55 * @server_ref: object reference of server port associated with subscription 55 * @server_ref: object reference of server port associated with subscription
56 * @swap: indicates if subscriber uses opposite endianness in its messages
57 * @evt: template for events generated by subscription 56 * @evt: template for events generated by subscription
58 */ 57 */
59 58
@@ -66,7 +65,6 @@ struct subscription {
66 struct list_head nameseq_list; 65 struct list_head nameseq_list;
67 struct list_head subscription_list; 66 struct list_head subscription_list;
68 u32 server_ref; 67 u32 server_ref;
69 int swap;
70 struct tipc_event evt; 68 struct tipc_event evt;
71}; 69};
72 70
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f25511903115..3d9122e78f41 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
144/* 144/*
145 * SMP locking strategy: 145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock 146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate rwlock. 147 * each socket state is protected by separate spin lock.
148 */ 148 */
149 149
150static inline unsigned unix_hash_fold(__wsum n) 150static inline unsigned unix_hash_fold(__wsum n)
@@ -2224,7 +2224,7 @@ static const struct net_proto_family unix_family_ops = {
2224}; 2224};
2225 2225
2226 2226
2227static int unix_net_init(struct net *net) 2227static int __net_init unix_net_init(struct net *net)
2228{ 2228{
2229 int error = -ENOMEM; 2229 int error = -ENOMEM;
2230 2230
@@ -2243,7 +2243,7 @@ out:
2243 return error; 2243 return error;
2244} 2244}
2245 2245
2246static void unix_net_exit(struct net *net) 2246static void __net_exit unix_net_exit(struct net *net)
2247{ 2247{
2248 unix_sysctl_unregister(net); 2248 unix_sysctl_unregister(net);
2249 proc_net_remove(net, "unix"); 2249 proc_net_remove(net, "unix");
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 19c17e4a0c8b..14c22c3768da 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -74,7 +74,6 @@
74#include <linux/un.h> 74#include <linux/un.h>
75#include <linux/net.h> 75#include <linux/net.h>
76#include <linux/fs.h> 76#include <linux/fs.h>
77#include <linux/slab.h>
78#include <linux/skbuff.h> 77#include <linux/skbuff.h>
79#include <linux/netdevice.h> 78#include <linux/netdevice.h>
80#include <linux/file.h> 79#include <linux/file.h>
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 708f5df6b7f0..397cffebb3b6 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/slab.h>
13#include <linux/sysctl.h> 14#include <linux/sysctl.h>
14 15
15#include <net/af_unix.h> 16#include <net/af_unix.h>
@@ -31,7 +32,7 @@ static struct ctl_path unix_path[] = {
31 { }, 32 { },
32}; 33};
33 34
34int unix_sysctl_register(struct net *net) 35int __net_init unix_sysctl_register(struct net *net)
35{ 36{
36 struct ctl_table *table; 37 struct ctl_table *table;
37 38
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d3bfb6ef13ae..d5b7c3779c43 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -72,6 +72,7 @@
72 * wimax_msg_send() 72 * wimax_msg_send()
73 */ 73 */
74#include <linux/device.h> 74#include <linux/device.h>
75#include <linux/slab.h>
75#include <net/genetlink.h> 76#include <net/genetlink.h>
76#include <linux/netdevice.h> 77#include <linux/netdevice.h>
77#include <linux/wimax.h> 78#include <linux/wimax.h>
@@ -320,8 +321,7 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
320EXPORT_SYMBOL_GPL(wimax_msg); 321EXPORT_SYMBOL_GPL(wimax_msg);
321 322
322 323
323static const 324static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
324struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
325 [WIMAX_GNL_MSG_IFIDX] = { 325 [WIMAX_GNL_MSG_IFIDX] = {
326 .type = NLA_U32, 326 .type = NLA_U32,
327 }, 327 },
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 35f370091f4f..4dc82a54ba30 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -91,8 +91,7 @@ int wimax_reset(struct wimax_dev *wimax_dev)
91EXPORT_SYMBOL(wimax_reset); 91EXPORT_SYMBOL(wimax_reset);
92 92
93 93
94static const 94static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
95struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
96 [WIMAX_GNL_RESET_IFIDX] = { 95 [WIMAX_GNL_RESET_IFIDX] = {
97 .type = NLA_U32, 96 .type = NLA_U32,
98 }, 97 },
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index ae752a64d920..e978c7136c97 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -410,8 +410,7 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
410 * just query). 410 * just query).
411 */ 411 */
412 412
413static const 413static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
414struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
415 [WIMAX_GNL_RFKILL_IFIDX] = { 414 [WIMAX_GNL_RFKILL_IFIDX] = {
416 .type = NLA_U32, 415 .type = NLA_U32,
417 }, 416 },
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index a76b8fcb056d..11ad3356eb56 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -33,8 +33,7 @@
33#include "debug-levels.h" 33#include "debug-levels.h"
34 34
35 35
36static const 36static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
37struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
38 [WIMAX_GNL_STGET_IFIDX] = { 37 [WIMAX_GNL_STGET_IFIDX] = {
39 .type = NLA_U32, 38 .type = NLA_U32,
40 }, 39 },
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index c8866412f830..1ed65dbdab03 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -51,6 +51,7 @@
51 * wimax_rfkill_rm() 51 * wimax_rfkill_rm()
52 */ 52 */
53#include <linux/device.h> 53#include <linux/device.h>
54#include <linux/gfp.h>
54#include <net/genetlink.h> 55#include <net/genetlink.h>
55#include <linux/netdevice.h> 56#include <linux/netdevice.h>
56#include <linux/wimax.h> 57#include <linux/wimax.h>
@@ -75,8 +76,7 @@ MODULE_PARM_DESC(debug,
75 * close to where the data is generated. 76 * close to where the data is generated.
76 */ 77 */
77/* 78/*
78static const 79static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
79struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
80 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 }, 80 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
81 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 }, 81 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
82}; 82};
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
new file mode 100644
index 000000000000..c33451b896d9
--- /dev/null
+++ b/net/wireless/.gitignore
@@ -0,0 +1 @@
regdb.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 90e93a5701aa..d0ee29063e5d 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -94,20 +94,21 @@ config CFG80211_DEBUGFS
94 94
95 If unsure, say N. 95 If unsure, say N.
96 96
97config WIRELESS_OLD_REGULATORY 97config CFG80211_INTERNAL_REGDB
98 bool "Old wireless static regulatory definitions" 98 bool "use statically compiled regulatory rules database" if EMBEDDED
99 default n 99 default n
100 depends on CFG80211 100 depends on CFG80211
101 ---help--- 101 ---help---
102 This option enables the old static regulatory information 102 This option generates an internal data structure representing
103 and uses it within the new framework. This option is available 103 the wireless regulatory rules described in net/wireless/db.txt
104 for historical reasons and it is advised to leave it off. 104 and includes code to query that database. This is an alternative
105 to using CRDA for defining regulatory rules for the kernel.
105 106
106 For details see: 107 For details see:
107 108
108 http://wireless.kernel.org/en/developers/Regulatory 109 http://wireless.kernel.org/en/developers/Regulatory
109 110
110 Say N and if you say Y, please tell us why. The default is N. 111 Most distributions have a CRDA package. So if unsure, say N.
111 112
112config CFG80211_WEXT 113config CFG80211_WEXT
113 bool "cfg80211 wireless extensions compatibility" 114 bool "cfg80211 wireless extensions compatibility"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index f07c8dc7aab2..e77e508126fa 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -13,5 +13,11 @@ cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o 13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o
14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o 14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o 15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
16 17
17ccflags-y += -D__CHECK_ENDIAN__ 18ccflags-y += -D__CHECK_ENDIAN__
19
20$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
21 @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
22
23clean-files := regdb.c
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a46ac6c9b365..bf1737fc9a7e 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -41,44 +41,57 @@ rdev_fixed_channel(struct cfg80211_registered_device *rdev,
41 return result; 41 return result;
42} 42}
43 43
44int rdev_set_freq(struct cfg80211_registered_device *rdev, 44struct ieee80211_channel *
45 struct wireless_dev *for_wdev, 45rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
46 int freq, enum nl80211_channel_type channel_type) 46 int freq, enum nl80211_channel_type channel_type)
47{ 47{
48 struct ieee80211_channel *chan; 48 struct ieee80211_channel *chan;
49 struct ieee80211_sta_ht_cap *ht_cap; 49 struct ieee80211_sta_ht_cap *ht_cap;
50 int result;
51
52 if (rdev_fixed_channel(rdev, for_wdev))
53 return -EBUSY;
54
55 if (!rdev->ops->set_channel)
56 return -EOPNOTSUPP;
57 50
58 chan = ieee80211_get_channel(&rdev->wiphy, freq); 51 chan = ieee80211_get_channel(&rdev->wiphy, freq);
59 52
60 /* Primary channel not allowed */ 53 /* Primary channel not allowed */
61 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 54 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
62 return -EINVAL; 55 return NULL;
63 56
64 if (channel_type == NL80211_CHAN_HT40MINUS && 57 if (channel_type == NL80211_CHAN_HT40MINUS &&
65 chan->flags & IEEE80211_CHAN_NO_HT40MINUS) 58 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
66 return -EINVAL; 59 return NULL;
67 else if (channel_type == NL80211_CHAN_HT40PLUS && 60 else if (channel_type == NL80211_CHAN_HT40PLUS &&
68 chan->flags & IEEE80211_CHAN_NO_HT40PLUS) 61 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
69 return -EINVAL; 62 return NULL;
70 63
71 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; 64 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
72 65
73 if (channel_type != NL80211_CHAN_NO_HT) { 66 if (channel_type != NL80211_CHAN_NO_HT) {
74 if (!ht_cap->ht_supported) 67 if (!ht_cap->ht_supported)
75 return -EINVAL; 68 return NULL;
76 69
77 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 70 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
78 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) 71 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
79 return -EINVAL; 72 return NULL;
80 } 73 }
81 74
75 return chan;
76}
77
78int rdev_set_freq(struct cfg80211_registered_device *rdev,
79 struct wireless_dev *for_wdev,
80 int freq, enum nl80211_channel_type channel_type)
81{
82 struct ieee80211_channel *chan;
83 int result;
84
85 if (rdev_fixed_channel(rdev, for_wdev))
86 return -EBUSY;
87
88 if (!rdev->ops->set_channel)
89 return -EOPNOTSUPP;
90
91 chan = rdev_freq_to_chan(rdev, freq, channel_type);
92 if (!chan)
93 return -EINVAL;
94
82 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); 95 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
83 if (result) 96 if (result)
84 return result; 97 return result;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 92b812442488..6ac70c101523 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/slab.h>
11#include <linux/nl80211.h> 12#include <linux/nl80211.h>
12#include <linux/debugfs.h> 13#include <linux/debugfs.h>
13#include <linux/notifier.h> 14#include <linux/notifier.h>
@@ -31,15 +32,10 @@ MODULE_AUTHOR("Johannes Berg");
31MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
32MODULE_DESCRIPTION("wireless configuration support"); 33MODULE_DESCRIPTION("wireless configuration support");
33 34
34/* RCU might be appropriate here since we usually 35/* RCU-protected (and cfg80211_mutex for writers) */
35 * only read the list, and that can happen quite
36 * often because we need to do it for each command */
37LIST_HEAD(cfg80211_rdev_list); 36LIST_HEAD(cfg80211_rdev_list);
38int cfg80211_rdev_list_generation; 37int cfg80211_rdev_list_generation;
39 38
40/*
41 * This is used to protect the cfg80211_rdev_list
42 */
43DEFINE_MUTEX(cfg80211_mutex); 39DEFINE_MUTEX(cfg80211_mutex);
44 40
45/* for debugfs */ 41/* for debugfs */
@@ -402,6 +398,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
402 rdev->wiphy.retry_long = 4; 398 rdev->wiphy.retry_long = 4;
403 rdev->wiphy.frag_threshold = (u32) -1; 399 rdev->wiphy.frag_threshold = (u32) -1;
404 rdev->wiphy.rts_threshold = (u32) -1; 400 rdev->wiphy.rts_threshold = (u32) -1;
401 rdev->wiphy.coverage_class = 0;
405 402
406 return &rdev->wiphy; 403 return &rdev->wiphy;
407} 404}
@@ -417,6 +414,18 @@ int wiphy_register(struct wiphy *wiphy)
417 int i; 414 int i;
418 u16 ifmodes = wiphy->interface_modes; 415 u16 ifmodes = wiphy->interface_modes;
419 416
417 if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
418 return -EINVAL;
419
420 if (WARN_ON(wiphy->addresses &&
421 !is_zero_ether_addr(wiphy->perm_addr) &&
422 memcmp(wiphy->perm_addr, wiphy->addresses[0].addr,
423 ETH_ALEN)))
424 return -EINVAL;
425
426 if (wiphy->addresses)
427 memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
428
420 /* sanity check ifmodes */ 429 /* sanity check ifmodes */
421 WARN_ON(!ifmodes); 430 WARN_ON(!ifmodes);
422 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; 431 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
@@ -476,7 +485,7 @@ int wiphy_register(struct wiphy *wiphy)
476 /* set up regulatory info */ 485 /* set up regulatory info */
477 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 486 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
478 487
479 list_add(&rdev->list, &cfg80211_rdev_list); 488 list_add_rcu(&rdev->list, &cfg80211_rdev_list);
480 cfg80211_rdev_list_generation++; 489 cfg80211_rdev_list_generation++;
481 490
482 mutex_unlock(&cfg80211_mutex); 491 mutex_unlock(&cfg80211_mutex);
@@ -553,7 +562,8 @@ void wiphy_unregister(struct wiphy *wiphy)
553 * it impossible to find from userspace. 562 * it impossible to find from userspace.
554 */ 563 */
555 debugfs_remove_recursive(rdev->wiphy.debugfsdir); 564 debugfs_remove_recursive(rdev->wiphy.debugfsdir);
556 list_del(&rdev->list); 565 list_del_rcu(&rdev->list);
566 synchronize_rcu();
557 567
558 /* 568 /*
559 * Try to grab rdev->mtx. If a command is still in progress, 569 * Try to grab rdev->mtx. If a command is still in progress,
@@ -668,8 +678,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
668 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); 678 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
669 INIT_LIST_HEAD(&wdev->event_list); 679 INIT_LIST_HEAD(&wdev->event_list);
670 spin_lock_init(&wdev->event_lock); 680 spin_lock_init(&wdev->event_lock);
681 INIT_LIST_HEAD(&wdev->action_registrations);
682 spin_lock_init(&wdev->action_registrations_lock);
683
671 mutex_lock(&rdev->devlist_mtx); 684 mutex_lock(&rdev->devlist_mtx);
672 list_add(&wdev->list, &rdev->netdev_list); 685 list_add_rcu(&wdev->list, &rdev->netdev_list);
673 rdev->devlist_generation++; 686 rdev->devlist_generation++;
674 /* can only change netns with wiphy */ 687 /* can only change netns with wiphy */
675 dev->features |= NETIF_F_NETNS_LOCAL; 688 dev->features |= NETIF_F_NETNS_LOCAL;
@@ -686,19 +699,21 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
686 wdev->wext.default_key = -1; 699 wdev->wext.default_key = -1;
687 wdev->wext.default_mgmt_key = -1; 700 wdev->wext.default_mgmt_key = -1;
688 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; 701 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
702#endif
703
689 if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) 704 if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
690 wdev->wext.ps = true; 705 wdev->ps = true;
691 else 706 else
692 wdev->wext.ps = false; 707 wdev->ps = false;
693 wdev->wext.ps_timeout = 100; 708 wdev->ps_timeout = 100;
694 if (rdev->ops->set_power_mgmt) 709 if (rdev->ops->set_power_mgmt)
695 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, 710 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
696 wdev->wext.ps, 711 wdev->ps,
697 wdev->wext.ps_timeout)) { 712 wdev->ps_timeout)) {
698 /* assume this means it's off */ 713 /* assume this means it's off */
699 wdev->wext.ps = false; 714 wdev->ps = false;
700 } 715 }
701#endif 716
702 if (!dev->ethtool_ops) 717 if (!dev->ethtool_ops)
703 dev->ethtool_ops = &cfg80211_ethtool_ops; 718 dev->ethtool_ops = &cfg80211_ethtool_ops;
704 719
@@ -781,13 +796,22 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
781 */ 796 */
782 if (!list_empty(&wdev->list)) { 797 if (!list_empty(&wdev->list)) {
783 sysfs_remove_link(&dev->dev.kobj, "phy80211"); 798 sysfs_remove_link(&dev->dev.kobj, "phy80211");
784 list_del_init(&wdev->list); 799 list_del_rcu(&wdev->list);
785 rdev->devlist_generation++; 800 rdev->devlist_generation++;
801 cfg80211_mlme_purge_actions(wdev);
786#ifdef CONFIG_CFG80211_WEXT 802#ifdef CONFIG_CFG80211_WEXT
787 kfree(wdev->wext.keys); 803 kfree(wdev->wext.keys);
788#endif 804#endif
789 } 805 }
790 mutex_unlock(&rdev->devlist_mtx); 806 mutex_unlock(&rdev->devlist_mtx);
807 /*
808 * synchronise (so that we won't find this netdev
809 * from other code any more) and then clear the list
810 * head so that the above code can safely check for
811 * !list_empty() to avoid double-cleanup.
812 */
813 synchronize_rcu();
814 INIT_LIST_HEAD(&wdev->list);
791 break; 815 break;
792 case NETDEV_PRE_UP: 816 case NETDEV_PRE_UP:
793 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 817 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 4ef3efc94106..d52da913145a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Wireless configuration interface internals. 2 * Wireless configuration interface internals.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#ifndef __NET_WIRELESS_CORE_H 6#ifndef __NET_WIRELESS_CORE_H
7#define __NET_WIRELESS_CORE_H 7#define __NET_WIRELESS_CORE_H
@@ -48,6 +48,7 @@ struct cfg80211_registered_device {
48 48
49 /* associate netdev list */ 49 /* associate netdev list */
50 struct mutex devlist_mtx; 50 struct mutex devlist_mtx;
51 /* protected by devlist_mtx or RCU */
51 struct list_head netdev_list; 52 struct list_head netdev_list;
52 int devlist_generation; 53 int devlist_generation;
53 int opencount; /* also protected by devlist_mtx */ 54 int opencount; /* also protected by devlist_mtx */
@@ -111,7 +112,8 @@ struct cfg80211_internal_bss {
111 unsigned long ts; 112 unsigned long ts;
112 struct kref ref; 113 struct kref ref;
113 atomic_t hold; 114 atomic_t hold;
114 bool ies_allocated; 115 bool beacon_ies_allocated;
116 bool proberesp_ies_allocated;
115 117
116 /* must be last because of priv member */ 118 /* must be last because of priv member */
117 struct cfg80211_bss pub; 119 struct cfg80211_bss pub;
@@ -327,6 +329,15 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
327 const u8 *resp_ie, size_t resp_ie_len, 329 const u8 *resp_ie, size_t resp_ie_len,
328 u16 status, bool wextev, 330 u16 status, bool wextev,
329 struct cfg80211_bss *bss); 331 struct cfg80211_bss *bss);
332int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
333 const u8 *match_data, int match_len);
334void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid);
335void cfg80211_mlme_purge_actions(struct wireless_dev *wdev);
336int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
337 struct net_device *dev,
338 struct ieee80211_channel *chan,
339 enum nl80211_channel_type channel_type,
340 const u8 *buf, size_t len, u64 *cookie);
330 341
331/* SME */ 342/* SME */
332int __cfg80211_connect(struct cfg80211_registered_device *rdev, 343int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -374,10 +385,15 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
374struct ieee80211_channel * 385struct ieee80211_channel *
375rdev_fixed_channel(struct cfg80211_registered_device *rdev, 386rdev_fixed_channel(struct cfg80211_registered_device *rdev,
376 struct wireless_dev *for_wdev); 387 struct wireless_dev *for_wdev);
388struct ieee80211_channel *
389rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
390 int freq, enum nl80211_channel_type channel_type);
377int rdev_set_freq(struct cfg80211_registered_device *rdev, 391int rdev_set_freq(struct cfg80211_registered_device *rdev,
378 struct wireless_dev *for_wdev, 392 struct wireless_dev *for_wdev,
379 int freq, enum nl80211_channel_type channel_type); 393 int freq, enum nl80211_channel_type channel_type);
380 394
395u16 cfg80211_calculate_bitrate(struct rate_info *rate);
396
381#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 397#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
382#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 398#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
383#else 399#else
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
new file mode 100644
index 000000000000..a2fc3a09ccdc
--- /dev/null
+++ b/net/wireless/db.txt
@@ -0,0 +1,17 @@
1#
2# This file is a placeholder to prevent accidental build breakage if someone
3# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
4# enable that build option.
5#
6# You should be using CRDA instead. It is even better if you use the CRDA
7# package provided by your distribution, since they will probably keep it
8# up-to-date on your behalf.
9#
10# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
11# need to replace this file with one containing appropriately formatted
12# regulatory rules that cover the regulatory domains you will be using. Your
13# best option is to extract the db.txt file from the wireless-regdb git
14# repository:
15#
16# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
17#
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 2e4895615037..a4991a3efec0 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include "core.h" 13#include "core.h"
13#include "debugfs.h" 14#include "debugfs.h"
14 15
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 000000000000..3cc9e69880a8
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,118 @@
1#!/usr/bin/awk -f
2#
3# genregdb.awk -- generate regdb.c from db.txt
4#
5# Actually, it reads from stdin (presumed to be db.txt) and writes
6# to stdout (presumed to be regdb.c), but close enough...
7#
8# Copyright 2009 John W. Linville <linville@tuxdriver.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2 as
12# published by the Free Software Foundation.
13#
14
15BEGIN {
16 active = 0
17 rules = 0;
18 print "/*"
19 print " * DO NOT EDIT -- file generated from data in db.txt"
20 print " */"
21 print ""
22 print "#include <linux/nl80211.h>"
23 print "#include <net/cfg80211.h>"
24 print ""
25 regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
26}
27
28/^[ \t]*#/ {
29 # Ignore
30}
31
32!active && /^[ \t]*$/ {
33 # Ignore
34}
35
36!active && /country/ {
37 country=$2
38 sub(/:/, "", country)
39 printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
40 printf "\t.alpha2 = \"%s\",\n", country
41 printf "\t.reg_rules = {\n"
42 active = 1
43 regdb = regdb "\t&regdom_" country ",\n"
44}
45
46active && /^[ \t]*\(/ {
47 start = $1
48 sub(/\(/, "", start)
49 end = $3
50 bw = $5
51 sub(/\),/, "", bw)
52 gain = $6
53 sub(/\(/, "", gain)
54 sub(/,/, "", gain)
55 power = $7
56 sub(/\)/, "", power)
57 sub(/,/, "", power)
58 # power might be in mW...
59 units = $8
60 sub(/\)/, "", units)
61 sub(/,/, "", units)
62 if (units == "mW") {
63 if (power == 100) {
64 power = 20
65 } else if (power == 200) {
66 power = 23
67 } else if (power == 500) {
68 power = 27
69 } else if (power == 1000) {
70 power = 30
71 } else {
72 print "Unknown power value in database!"
73 }
74 }
75 flagstr = ""
76 for (i=8; i<=NF; i++)
77 flagstr = flagstr $i
78 split(flagstr, flagarray, ",")
79 flags = ""
80 for (arg in flagarray) {
81 if (flagarray[arg] == "NO-OFDM") {
82 flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
83 } else if (flagarray[arg] == "NO-CCK") {
84 flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
85 } else if (flagarray[arg] == "NO-INDOOR") {
86 flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
87 } else if (flagarray[arg] == "NO-OUTDOOR") {
88 flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
89 } else if (flagarray[arg] == "DFS") {
90 flags = flags "\n\t\t\tNL80211_RRF_DFS | "
91 } else if (flagarray[arg] == "PTP-ONLY") {
92 flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
93 } else if (flagarray[arg] == "PTMP-ONLY") {
94 flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
95 } else if (flagarray[arg] == "PASSIVE-SCAN") {
96 flags = flags "\n\t\t\tNL80211_RRF_PASSIVE_SCAN | "
97 } else if (flagarray[arg] == "NO-IBSS") {
98 flags = flags "\n\t\t\tNL80211_RRF_NO_IBSS | "
99 }
100 }
101 flags = flags "0"
102 printf "\t\tREG_RULE(%d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, flags
103 rules++
104}
105
106active && /^[ \t]*$/ {
107 active = 0
108 printf "\t},\n"
109 printf "\t.n_reg_rules = %d\n", rules
110 printf "};\n\n"
111 rules = 0;
112}
113
114END {
115 print regdb "};"
116 print ""
117 print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
118}
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 6ef5a491fb4b..6a5acf750174 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
8#include <linux/if_arp.h> 8#include <linux/if_arp.h>
9#include <linux/slab.h>
9#include <net/cfg80211.h> 10#include <net/cfg80211.h>
10#include "wext-compat.h" 11#include "wext-compat.h"
11#include "nl80211.h" 12#include "nl80211.h"
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 2301dc1edc4c..b7fa31d5fd13 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -237,7 +237,6 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
237 return -1; 237 return -1;
238 238
239 pos = skb->data + hdr_len + CCMP_HDR_LEN; 239 pos = skb->data + hdr_len + CCMP_HDR_LEN;
240 mic = skb_put(skb, CCMP_MIC_LEN);
241 hdr = (struct ieee80211_hdr *)skb->data; 240 hdr = (struct ieee80211_hdr *)skb->data;
242 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); 241 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
243 242
@@ -257,6 +256,7 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
257 pos += len; 256 pos += len;
258 } 257 }
259 258
259 mic = skb_put(skb, CCMP_MIC_LEN);
260 for (i = 0; i < CCMP_MIC_LEN; i++) 260 for (i = 0; i < CCMP_MIC_LEN; i++)
261 mic[i] = b[i] ^ s0[i]; 261 mic[i] = b[i] ^ s0[i];
262 262
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index c36287399d7e..8cbdb32ff316 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -36,6 +36,8 @@ MODULE_AUTHOR("Jouni Malinen");
36MODULE_DESCRIPTION("lib80211 crypt: TKIP"); 36MODULE_DESCRIPTION("lib80211 crypt: TKIP");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39#define TKIP_HDR_LEN 8
40
39struct lib80211_tkip_data { 41struct lib80211_tkip_data {
40#define TKIP_KEY_LEN 32 42#define TKIP_KEY_LEN 32
41 u8 key[TKIP_KEY_LEN]; 43 u8 key[TKIP_KEY_LEN];
@@ -314,13 +316,12 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
314 u8 * rc4key, int keylen, void *priv) 316 u8 * rc4key, int keylen, void *priv)
315{ 317{
316 struct lib80211_tkip_data *tkey = priv; 318 struct lib80211_tkip_data *tkey = priv;
317 int len;
318 u8 *pos; 319 u8 *pos;
319 struct ieee80211_hdr *hdr; 320 struct ieee80211_hdr *hdr;
320 321
321 hdr = (struct ieee80211_hdr *)skb->data; 322 hdr = (struct ieee80211_hdr *)skb->data;
322 323
323 if (skb_headroom(skb) < 8 || skb->len < hdr_len) 324 if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len)
324 return -1; 325 return -1;
325 326
326 if (rc4key == NULL || keylen < 16) 327 if (rc4key == NULL || keylen < 16)
@@ -333,9 +334,8 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
333 } 334 }
334 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); 335 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
335 336
336 len = skb->len - hdr_len; 337 pos = skb_push(skb, TKIP_HDR_LEN);
337 pos = skb_push(skb, 8); 338 memmove(pos, pos + TKIP_HDR_LEN, hdr_len);
338 memmove(pos, pos + 8, hdr_len);
339 pos += hdr_len; 339 pos += hdr_len;
340 340
341 *pos++ = *rc4key; 341 *pos++ = *rc4key;
@@ -353,7 +353,7 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
353 tkey->tx_iv32++; 353 tkey->tx_iv32++;
354 } 354 }
355 355
356 return 8; 356 return TKIP_HDR_LEN;
357} 357}
358 358
359static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 359static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
@@ -384,9 +384,8 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
384 if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) 384 if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0)
385 return -1; 385 return -1;
386 386
387 icv = skb_put(skb, 4);
388
389 crc = ~crc32_le(~0, pos, len); 387 crc = ~crc32_le(~0, pos, len);
388 icv = skb_put(skb, 4);
390 icv[0] = crc; 389 icv[0] = crc;
391 icv[1] = crc >> 8; 390 icv[1] = crc >> 8;
392 icv[2] = crc >> 16; 391 icv[2] = crc >> 16;
@@ -434,7 +433,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
434 return -1; 433 return -1;
435 } 434 }
436 435
437 if (skb->len < hdr_len + 8 + 4) 436 if (skb->len < hdr_len + TKIP_HDR_LEN + 4)
438 return -1; 437 return -1;
439 438
440 pos = skb->data + hdr_len; 439 pos = skb->data + hdr_len;
@@ -462,7 +461,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
462 } 461 }
463 iv16 = (pos[0] << 8) | pos[2]; 462 iv16 = (pos[0] << 8) | pos[2];
464 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); 463 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
465 pos += 8; 464 pos += TKIP_HDR_LEN;
466 465
467 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { 466 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
468#ifdef CONFIG_LIB80211_DEBUG 467#ifdef CONFIG_LIB80211_DEBUG
@@ -523,8 +522,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
523 tkey->rx_iv16_new = iv16; 522 tkey->rx_iv16_new = iv16;
524 523
525 /* Remove IV and ICV */ 524 /* Remove IV and ICV */
526 memmove(skb->data + 8, skb->data, hdr_len); 525 memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len);
527 skb_pull(skb, 8); 526 skb_pull(skb, TKIP_HDR_LEN);
528 skb_trim(skb, skb->len - 4); 527 skb_trim(skb, skb->len - 4);
529 528
530 return keyidx; 529 return keyidx;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 82e6002c8d67..22139fa46115 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/nl80211.h> 10#include <linux/nl80211.h>
11#include <linux/slab.h>
11#include <linux/wireless.h> 12#include <linux/wireless.h>
12#include <net/cfg80211.h> 13#include <net/cfg80211.h>
13#include <net/iw_handler.h> 14#include <net/iw_handler.h>
@@ -148,22 +149,23 @@ void __cfg80211_send_deauth(struct net_device *dev,
148 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
149 const u8 *bssid = mgmt->bssid; 150 const u8 *bssid = mgmt->bssid;
150 int i; 151 int i;
152 bool found = false;
151 153
152 ASSERT_WDEV_LOCK(wdev); 154 ASSERT_WDEV_LOCK(wdev);
153 155
154 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
155
156 if (wdev->current_bss && 156 if (wdev->current_bss &&
157 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 157 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
158 cfg80211_unhold_bss(wdev->current_bss); 158 cfg80211_unhold_bss(wdev->current_bss);
159 cfg80211_put_bss(&wdev->current_bss->pub); 159 cfg80211_put_bss(&wdev->current_bss->pub);
160 wdev->current_bss = NULL; 160 wdev->current_bss = NULL;
161 found = true;
161 } else for (i = 0; i < MAX_AUTH_BSSES; i++) { 162 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
162 if (wdev->auth_bsses[i] && 163 if (wdev->auth_bsses[i] &&
163 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { 164 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
164 cfg80211_unhold_bss(wdev->auth_bsses[i]); 165 cfg80211_unhold_bss(wdev->auth_bsses[i]);
165 cfg80211_put_bss(&wdev->auth_bsses[i]->pub); 166 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
166 wdev->auth_bsses[i] = NULL; 167 wdev->auth_bsses[i] = NULL;
168 found = true;
167 break; 169 break;
168 } 170 }
169 if (wdev->authtry_bsses[i] && 171 if (wdev->authtry_bsses[i] &&
@@ -171,10 +173,16 @@ void __cfg80211_send_deauth(struct net_device *dev,
171 cfg80211_unhold_bss(wdev->authtry_bsses[i]); 173 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
172 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); 174 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
173 wdev->authtry_bsses[i] = NULL; 175 wdev->authtry_bsses[i] = NULL;
176 found = true;
174 break; 177 break;
175 } 178 }
176 } 179 }
177 180
181 if (!found)
182 return;
183
184 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
185
178 if (wdev->sme_state == CFG80211_SME_CONNECTED) { 186 if (wdev->sme_state == CFG80211_SME_CONNECTED) {
179 u16 reason_code; 187 u16 reason_code;
180 bool from_ap; 188 bool from_ap;
@@ -684,3 +692,206 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
684 } 692 }
685 } 693 }
686} 694}
695
696void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
697 struct ieee80211_channel *chan,
698 enum nl80211_channel_type channel_type,
699 unsigned int duration, gfp_t gfp)
700{
701 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
702 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
703
704 nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
705 duration, gfp);
706}
707EXPORT_SYMBOL(cfg80211_ready_on_channel);
708
709void cfg80211_remain_on_channel_expired(struct net_device *dev,
710 u64 cookie,
711 struct ieee80211_channel *chan,
712 enum nl80211_channel_type channel_type,
713 gfp_t gfp)
714{
715 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
716 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
717
718 nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
719 channel_type, gfp);
720}
721EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
722
723void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
724 struct station_info *sinfo, gfp_t gfp)
725{
726 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
727 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
728
729 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
730}
731EXPORT_SYMBOL(cfg80211_new_sta);
732
733struct cfg80211_action_registration {
734 struct list_head list;
735
736 u32 nlpid;
737
738 int match_len;
739
740 u8 match[];
741};
742
743int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
744 const u8 *match_data, int match_len)
745{
746 struct cfg80211_action_registration *reg, *nreg;
747 int err = 0;
748
749 nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL);
750 if (!nreg)
751 return -ENOMEM;
752
753 spin_lock_bh(&wdev->action_registrations_lock);
754
755 list_for_each_entry(reg, &wdev->action_registrations, list) {
756 int mlen = min(match_len, reg->match_len);
757
758 if (memcmp(reg->match, match_data, mlen) == 0) {
759 err = -EALREADY;
760 break;
761 }
762 }
763
764 if (err) {
765 kfree(nreg);
766 goto out;
767 }
768
769 memcpy(nreg->match, match_data, match_len);
770 nreg->match_len = match_len;
771 nreg->nlpid = snd_pid;
772 list_add(&nreg->list, &wdev->action_registrations);
773
774 out:
775 spin_unlock_bh(&wdev->action_registrations_lock);
776 return err;
777}
778
779void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid)
780{
781 struct cfg80211_action_registration *reg, *tmp;
782
783 spin_lock_bh(&wdev->action_registrations_lock);
784
785 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
786 if (reg->nlpid == nlpid) {
787 list_del(&reg->list);
788 kfree(reg);
789 }
790 }
791
792 spin_unlock_bh(&wdev->action_registrations_lock);
793}
794
795void cfg80211_mlme_purge_actions(struct wireless_dev *wdev)
796{
797 struct cfg80211_action_registration *reg, *tmp;
798
799 spin_lock_bh(&wdev->action_registrations_lock);
800
801 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
802 list_del(&reg->list);
803 kfree(reg);
804 }
805
806 spin_unlock_bh(&wdev->action_registrations_lock);
807}
808
809int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
810 struct net_device *dev,
811 struct ieee80211_channel *chan,
812 enum nl80211_channel_type channel_type,
813 const u8 *buf, size_t len, u64 *cookie)
814{
815 struct wireless_dev *wdev = dev->ieee80211_ptr;
816 const struct ieee80211_mgmt *mgmt;
817
818 if (rdev->ops->action == NULL)
819 return -EOPNOTSUPP;
820 if (len < 24 + 1)
821 return -EINVAL;
822
823 mgmt = (const struct ieee80211_mgmt *) buf;
824 if (!ieee80211_is_action(mgmt->frame_control))
825 return -EINVAL;
826 if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
827 /* Verify that we are associated with the destination AP */
828 if (!wdev->current_bss ||
829 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
830 ETH_ALEN) != 0 ||
831 memcmp(wdev->current_bss->pub.bssid, mgmt->da,
832 ETH_ALEN) != 0)
833 return -ENOTCONN;
834 }
835
836 if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
837 return -EINVAL;
838
839 /* Transmit the Action frame as requested by user space */
840 return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type,
841 buf, len, cookie);
842}
843
844bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
845 size_t len, gfp_t gfp)
846{
847 struct wireless_dev *wdev = dev->ieee80211_ptr;
848 struct wiphy *wiphy = wdev->wiphy;
849 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
850 struct cfg80211_action_registration *reg;
851 const u8 *action_data;
852 int action_data_len;
853 bool result = false;
854
855 /* frame length - min size excluding category */
856 action_data_len = len - (IEEE80211_MIN_ACTION_SIZE - 1);
857
858 /* action data starts with category */
859 action_data = buf + IEEE80211_MIN_ACTION_SIZE - 1;
860
861 spin_lock_bh(&wdev->action_registrations_lock);
862
863 list_for_each_entry(reg, &wdev->action_registrations, list) {
864 if (reg->match_len > action_data_len)
865 continue;
866
867 if (memcmp(reg->match, action_data, reg->match_len))
868 continue;
869
870 /* found match! */
871
872 /* Indicate the received Action frame to user space */
873 if (nl80211_send_action(rdev, dev, reg->nlpid, freq,
874 buf, len, gfp))
875 continue;
876
877 result = true;
878 break;
879 }
880
881 spin_unlock_bh(&wdev->action_registrations_lock);
882
883 return result;
884}
885EXPORT_SYMBOL(cfg80211_rx_action);
886
887void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
888 const u8 *buf, size_t len, bool ack, gfp_t gfp)
889{
890 struct wireless_dev *wdev = dev->ieee80211_ptr;
891 struct wiphy *wiphy = wdev->wiphy;
892 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
893
894 /* Indicate TX status of the Action frame to user space */
895 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
896}
897EXPORT_SYMBOL(cfg80211_action_tx_status);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a6028433e3a0..030cf153bea2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * This is the new netlink-based wireless configuration interface. 2 * This is the new netlink-based wireless configuration interface.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/slab.h>
10#include <linux/list.h> 11#include <linux/list.h>
11#include <linux/if_ether.h> 12#include <linux/if_ether.h>
12#include <linux/ieee80211.h> 13#include <linux/ieee80211.h>
@@ -58,7 +59,7 @@ static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
58} 59}
59 60
60/* policy for the attributes */ 61/* policy for the attributes */
61static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { 62static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
62 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, 63 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
63 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, 64 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
64 .len = 20-1 }, 65 .len = 20-1 },
@@ -69,6 +70,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
69 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, 70 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
70 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, 71 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
71 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, 72 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
73 [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
72 74
73 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, 75 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
74 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 76 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -141,11 +143,17 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
141 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, 143 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
142 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, 144 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
143 .len = WLAN_PMKID_LEN }, 145 .len = WLAN_PMKID_LEN },
146 [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
147 [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
148 [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
149 [NL80211_ATTR_FRAME] = { .type = NLA_BINARY,
150 .len = IEEE80211_MAX_DATA_LEN },
151 [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
152 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
144}; 153};
145 154
146/* policy for the attributes */ 155/* policy for the attributes */
147static struct nla_policy 156static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
148nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
149 [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, 157 [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
150 [NL80211_KEY_IDX] = { .type = NLA_U8 }, 158 [NL80211_KEY_IDX] = { .type = NLA_U8 },
151 [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, 159 [NL80211_KEY_CIPHER] = { .type = NLA_U32 },
@@ -442,6 +450,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
442 dev->wiphy.frag_threshold); 450 dev->wiphy.frag_threshold);
443 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 451 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
444 dev->wiphy.rts_threshold); 452 dev->wiphy.rts_threshold);
453 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
454 dev->wiphy.coverage_class);
445 455
446 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 456 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
447 dev->wiphy.max_scan_ssids); 457 dev->wiphy.max_scan_ssids);
@@ -569,6 +579,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
569 CMD(set_pmksa, SET_PMKSA); 579 CMD(set_pmksa, SET_PMKSA);
570 CMD(del_pmksa, DEL_PMKSA); 580 CMD(del_pmksa, DEL_PMKSA);
571 CMD(flush_pmksa, FLUSH_PMKSA); 581 CMD(flush_pmksa, FLUSH_PMKSA);
582 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
583 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
584 CMD(action, ACTION);
572 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 585 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
573 i++; 586 i++;
574 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 587 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -681,6 +694,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
681 u32 changed; 694 u32 changed;
682 u8 retry_short = 0, retry_long = 0; 695 u8 retry_short = 0, retry_long = 0;
683 u32 frag_threshold = 0, rts_threshold = 0; 696 u32 frag_threshold = 0, rts_threshold = 0;
697 u8 coverage_class = 0;
684 698
685 rtnl_lock(); 699 rtnl_lock();
686 700
@@ -803,9 +817,16 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
803 changed |= WIPHY_PARAM_RTS_THRESHOLD; 817 changed |= WIPHY_PARAM_RTS_THRESHOLD;
804 } 818 }
805 819
820 if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
821 coverage_class = nla_get_u8(
822 info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
823 changed |= WIPHY_PARAM_COVERAGE_CLASS;
824 }
825
806 if (changed) { 826 if (changed) {
807 u8 old_retry_short, old_retry_long; 827 u8 old_retry_short, old_retry_long;
808 u32 old_frag_threshold, old_rts_threshold; 828 u32 old_frag_threshold, old_rts_threshold;
829 u8 old_coverage_class;
809 830
810 if (!rdev->ops->set_wiphy_params) { 831 if (!rdev->ops->set_wiphy_params) {
811 result = -EOPNOTSUPP; 832 result = -EOPNOTSUPP;
@@ -816,6 +837,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
816 old_retry_long = rdev->wiphy.retry_long; 837 old_retry_long = rdev->wiphy.retry_long;
817 old_frag_threshold = rdev->wiphy.frag_threshold; 838 old_frag_threshold = rdev->wiphy.frag_threshold;
818 old_rts_threshold = rdev->wiphy.rts_threshold; 839 old_rts_threshold = rdev->wiphy.rts_threshold;
840 old_coverage_class = rdev->wiphy.coverage_class;
819 841
820 if (changed & WIPHY_PARAM_RETRY_SHORT) 842 if (changed & WIPHY_PARAM_RETRY_SHORT)
821 rdev->wiphy.retry_short = retry_short; 843 rdev->wiphy.retry_short = retry_short;
@@ -825,6 +847,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
825 rdev->wiphy.frag_threshold = frag_threshold; 847 rdev->wiphy.frag_threshold = frag_threshold;
826 if (changed & WIPHY_PARAM_RTS_THRESHOLD) 848 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
827 rdev->wiphy.rts_threshold = rts_threshold; 849 rdev->wiphy.rts_threshold = rts_threshold;
850 if (changed & WIPHY_PARAM_COVERAGE_CLASS)
851 rdev->wiphy.coverage_class = coverage_class;
828 852
829 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); 853 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
830 if (result) { 854 if (result) {
@@ -832,6 +856,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
832 rdev->wiphy.retry_long = old_retry_long; 856 rdev->wiphy.retry_long = old_retry_long;
833 rdev->wiphy.frag_threshold = old_frag_threshold; 857 rdev->wiphy.frag_threshold = old_frag_threshold;
834 rdev->wiphy.rts_threshold = old_rts_threshold; 858 rdev->wiphy.rts_threshold = old_rts_threshold;
859 rdev->wiphy.coverage_class = old_coverage_class;
835 } 860 }
836 } 861 }
837 862
@@ -1637,42 +1662,9 @@ static int parse_station_flags(struct genl_info *info,
1637 return 0; 1662 return 0;
1638} 1663}
1639 1664
1640static u16 nl80211_calculate_bitrate(struct rate_info *rate)
1641{
1642 int modulation, streams, bitrate;
1643
1644 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
1645 return rate->legacy;
1646
1647 /* the formula below does only work for MCS values smaller than 32 */
1648 if (rate->mcs >= 32)
1649 return 0;
1650
1651 modulation = rate->mcs & 7;
1652 streams = (rate->mcs >> 3) + 1;
1653
1654 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
1655 13500000 : 6500000;
1656
1657 if (modulation < 4)
1658 bitrate *= (modulation + 1);
1659 else if (modulation == 4)
1660 bitrate *= (modulation + 2);
1661 else
1662 bitrate *= (modulation + 3);
1663
1664 bitrate *= streams;
1665
1666 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
1667 bitrate = (bitrate / 9) * 10;
1668
1669 /* do NOT round down here */
1670 return (bitrate + 50000) / 100000;
1671}
1672
1673static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 1665static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1674 int flags, struct net_device *dev, 1666 int flags, struct net_device *dev,
1675 u8 *mac_addr, struct station_info *sinfo) 1667 const u8 *mac_addr, struct station_info *sinfo)
1676{ 1668{
1677 void *hdr; 1669 void *hdr;
1678 struct nlattr *sinfoattr, *txrate; 1670 struct nlattr *sinfoattr, *txrate;
@@ -1716,8 +1708,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1716 if (!txrate) 1708 if (!txrate)
1717 goto nla_put_failure; 1709 goto nla_put_failure;
1718 1710
1719 /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */ 1711 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
1720 bitrate = nl80211_calculate_bitrate(&sinfo->txrate); 1712 bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
1721 if (bitrate > 0) 1713 if (bitrate > 0)
1722 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); 1714 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
1723 1715
@@ -2023,6 +2015,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2023 if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) 2015 if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES])
2024 return -EINVAL; 2016 return -EINVAL;
2025 2017
2018 if (!info->attrs[NL80211_ATTR_STA_AID])
2019 return -EINVAL;
2020
2026 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 2021 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2027 params.supported_rates = 2022 params.supported_rates =
2028 nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 2023 nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
@@ -2031,11 +2026,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2031 params.listen_interval = 2026 params.listen_interval =
2032 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 2027 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
2033 2028
2034 if (info->attrs[NL80211_ATTR_STA_AID]) { 2029 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
2035 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 2030 if (!params.aid || params.aid > IEEE80211_MAX_AID)
2036 if (!params.aid || params.aid > IEEE80211_MAX_AID) 2031 return -EINVAL;
2037 return -EINVAL;
2038 }
2039 2032
2040 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 2033 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
2041 params.ht_capa = 2034 params.ht_capa =
@@ -2050,6 +2043,12 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2050 if (err) 2043 if (err)
2051 goto out_rtnl; 2044 goto out_rtnl;
2052 2045
2046 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2047 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
2048 err = -EINVAL;
2049 goto out;
2050 }
2051
2053 err = get_vlan(info, rdev, &params.vlan); 2052 err = get_vlan(info, rdev, &params.vlan);
2054 if (err) 2053 if (err)
2055 goto out; 2054 goto out;
@@ -2057,35 +2056,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2057 /* validate settings */ 2056 /* validate settings */
2058 err = 0; 2057 err = 0;
2059 2058
2060 switch (dev->ieee80211_ptr->iftype) {
2061 case NL80211_IFTYPE_AP:
2062 case NL80211_IFTYPE_AP_VLAN:
2063 /* all ok but must have AID */
2064 if (!params.aid)
2065 err = -EINVAL;
2066 break;
2067 case NL80211_IFTYPE_MESH_POINT:
2068 /* disallow things mesh doesn't support */
2069 if (params.vlan)
2070 err = -EINVAL;
2071 if (params.aid)
2072 err = -EINVAL;
2073 if (params.ht_capa)
2074 err = -EINVAL;
2075 if (params.listen_interval >= 0)
2076 err = -EINVAL;
2077 if (params.supported_rates)
2078 err = -EINVAL;
2079 if (params.sta_flags_mask)
2080 err = -EINVAL;
2081 break;
2082 default:
2083 err = -EINVAL;
2084 }
2085
2086 if (err)
2087 goto out;
2088
2089 if (!rdev->ops->add_station) { 2059 if (!rdev->ops->add_station) {
2090 err = -EOPNOTSUPP; 2060 err = -EOPNOTSUPP;
2091 goto out; 2061 goto out;
@@ -2126,8 +2096,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
2126 goto out_rtnl; 2096 goto out_rtnl;
2127 2097
2128 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2098 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2129 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2099 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
2130 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
2131 err = -EINVAL; 2100 err = -EINVAL;
2132 goto out; 2101 goto out;
2133 } 2102 }
@@ -2514,8 +2483,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
2514 return err; 2483 return err;
2515} 2484}
2516 2485
2517static const struct nla_policy 2486static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
2518 reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
2519 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, 2487 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
2520 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, 2488 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
2521 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, 2489 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
@@ -2583,12 +2551,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
2583 2551
2584 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); 2552 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
2585 2553
2586#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2587 /* We ignore world regdom requests with the old regdom setup */
2588 if (is_world_regdom(data))
2589 return -EINVAL;
2590#endif
2591
2592 r = regulatory_hint_user(data); 2554 r = regulatory_hint_user(data);
2593 2555
2594 return r; 2556 return r;
@@ -2690,8 +2652,7 @@ do {\
2690 } \ 2652 } \
2691} while (0);\ 2653} while (0);\
2692 2654
2693static struct nla_policy 2655static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
2694nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] __read_mostly = {
2695 [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 }, 2656 [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
2696 [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 }, 2657 [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
2697 [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 }, 2658 [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 },
@@ -3182,6 +3143,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3182 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, 3143 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
3183 res->len_information_elements, 3144 res->len_information_elements,
3184 res->information_elements); 3145 res->information_elements);
3146 if (res->beacon_ies && res->len_beacon_ies &&
3147 res->beacon_ies != res->information_elements)
3148 NLA_PUT(msg, NL80211_BSS_BEACON_IES,
3149 res->len_beacon_ies, res->beacon_ies);
3185 if (res->tsf) 3150 if (res->tsf)
3186 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); 3151 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
3187 if (res->beacon_interval) 3152 if (res->beacon_interval)
@@ -3586,6 +3551,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3586{ 3551{
3587 struct cfg80211_registered_device *rdev; 3552 struct cfg80211_registered_device *rdev;
3588 struct net_device *dev; 3553 struct net_device *dev;
3554 struct wireless_dev *wdev;
3589 struct cfg80211_crypto_settings crypto; 3555 struct cfg80211_crypto_settings crypto;
3590 struct ieee80211_channel *chan, *fixedchan; 3556 struct ieee80211_channel *chan, *fixedchan;
3591 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; 3557 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
@@ -3631,7 +3597,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3631 } 3597 }
3632 3598
3633 mutex_lock(&rdev->devlist_mtx); 3599 mutex_lock(&rdev->devlist_mtx);
3634 fixedchan = rdev_fixed_channel(rdev, NULL); 3600 wdev = dev->ieee80211_ptr;
3601 fixedchan = rdev_fixed_channel(rdev, wdev);
3635 if (fixedchan && chan != fixedchan) { 3602 if (fixedchan && chan != fixedchan) {
3636 err = -EBUSY; 3603 err = -EBUSY;
3637 mutex_unlock(&rdev->devlist_mtx); 3604 mutex_unlock(&rdev->devlist_mtx);
@@ -4322,6 +4289,496 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4322 4289
4323} 4290}
4324 4291
4292static int nl80211_remain_on_channel(struct sk_buff *skb,
4293 struct genl_info *info)
4294{
4295 struct cfg80211_registered_device *rdev;
4296 struct net_device *dev;
4297 struct ieee80211_channel *chan;
4298 struct sk_buff *msg;
4299 void *hdr;
4300 u64 cookie;
4301 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
4302 u32 freq, duration;
4303 int err;
4304
4305 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
4306 !info->attrs[NL80211_ATTR_DURATION])
4307 return -EINVAL;
4308
4309 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
4310
4311 /*
4312 * We should be on that channel for at least one jiffie,
4313 * and more than 5 seconds seems excessive.
4314 */
4315 if (!duration || !msecs_to_jiffies(duration) || duration > 5000)
4316 return -EINVAL;
4317
4318 rtnl_lock();
4319
4320 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4321 if (err)
4322 goto unlock_rtnl;
4323
4324 if (!rdev->ops->remain_on_channel) {
4325 err = -EOPNOTSUPP;
4326 goto out;
4327 }
4328
4329 if (!netif_running(dev)) {
4330 err = -ENETDOWN;
4331 goto out;
4332 }
4333
4334 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4335 channel_type = nla_get_u32(
4336 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4337 if (channel_type != NL80211_CHAN_NO_HT &&
4338 channel_type != NL80211_CHAN_HT20 &&
4339 channel_type != NL80211_CHAN_HT40PLUS &&
4340 channel_type != NL80211_CHAN_HT40MINUS)
4341 err = -EINVAL;
4342 goto out;
4343 }
4344
4345 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
4346 chan = rdev_freq_to_chan(rdev, freq, channel_type);
4347 if (chan == NULL) {
4348 err = -EINVAL;
4349 goto out;
4350 }
4351
4352 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4353 if (!msg) {
4354 err = -ENOMEM;
4355 goto out;
4356 }
4357
4358 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4359 NL80211_CMD_REMAIN_ON_CHANNEL);
4360
4361 if (IS_ERR(hdr)) {
4362 err = PTR_ERR(hdr);
4363 goto free_msg;
4364 }
4365
4366 err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
4367 channel_type, duration, &cookie);
4368
4369 if (err)
4370 goto free_msg;
4371
4372 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
4373
4374 genlmsg_end(msg, hdr);
4375 err = genlmsg_reply(msg, info);
4376 goto out;
4377
4378 nla_put_failure:
4379 err = -ENOBUFS;
4380 free_msg:
4381 nlmsg_free(msg);
4382 out:
4383 cfg80211_unlock_rdev(rdev);
4384 dev_put(dev);
4385 unlock_rtnl:
4386 rtnl_unlock();
4387 return err;
4388}
4389
4390static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
4391 struct genl_info *info)
4392{
4393 struct cfg80211_registered_device *rdev;
4394 struct net_device *dev;
4395 u64 cookie;
4396 int err;
4397
4398 if (!info->attrs[NL80211_ATTR_COOKIE])
4399 return -EINVAL;
4400
4401 rtnl_lock();
4402
4403 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4404 if (err)
4405 goto unlock_rtnl;
4406
4407 if (!rdev->ops->cancel_remain_on_channel) {
4408 err = -EOPNOTSUPP;
4409 goto out;
4410 }
4411
4412 if (!netif_running(dev)) {
4413 err = -ENETDOWN;
4414 goto out;
4415 }
4416
4417 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
4418
4419 err = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
4420
4421 out:
4422 cfg80211_unlock_rdev(rdev);
4423 dev_put(dev);
4424 unlock_rtnl:
4425 rtnl_unlock();
4426 return err;
4427}
4428
4429static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
4430 u8 *rates, u8 rates_len)
4431{
4432 u8 i;
4433 u32 mask = 0;
4434
4435 for (i = 0; i < rates_len; i++) {
4436 int rate = (rates[i] & 0x7f) * 5;
4437 int ridx;
4438 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
4439 struct ieee80211_rate *srate =
4440 &sband->bitrates[ridx];
4441 if (rate == srate->bitrate) {
4442 mask |= 1 << ridx;
4443 break;
4444 }
4445 }
4446 if (ridx == sband->n_bitrates)
4447 return 0; /* rate not found */
4448 }
4449
4450 return mask;
4451}
4452
4453static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
4454 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
4455 .len = NL80211_MAX_SUPP_RATES },
4456};
4457
4458static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
4459 struct genl_info *info)
4460{
4461 struct nlattr *tb[NL80211_TXRATE_MAX + 1];
4462 struct cfg80211_registered_device *rdev;
4463 struct cfg80211_bitrate_mask mask;
4464 int err, rem, i;
4465 struct net_device *dev;
4466 struct nlattr *tx_rates;
4467 struct ieee80211_supported_band *sband;
4468
4469 if (info->attrs[NL80211_ATTR_TX_RATES] == NULL)
4470 return -EINVAL;
4471
4472 rtnl_lock();
4473
4474 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4475 if (err)
4476 goto unlock_rtnl;
4477
4478 if (!rdev->ops->set_bitrate_mask) {
4479 err = -EOPNOTSUPP;
4480 goto unlock;
4481 }
4482
4483 memset(&mask, 0, sizeof(mask));
4484 /* Default to all rates enabled */
4485 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
4486 sband = rdev->wiphy.bands[i];
4487 mask.control[i].legacy =
4488 sband ? (1 << sband->n_bitrates) - 1 : 0;
4489 }
4490
4491 /*
4492 * The nested attribute uses enum nl80211_band as the index. This maps
4493 * directly to the enum ieee80211_band values used in cfg80211.
4494 */
4495 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
4496 {
4497 enum ieee80211_band band = nla_type(tx_rates);
4498 if (band < 0 || band >= IEEE80211_NUM_BANDS) {
4499 err = -EINVAL;
4500 goto unlock;
4501 }
4502 sband = rdev->wiphy.bands[band];
4503 if (sband == NULL) {
4504 err = -EINVAL;
4505 goto unlock;
4506 }
4507 nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
4508 nla_len(tx_rates), nl80211_txattr_policy);
4509 if (tb[NL80211_TXRATE_LEGACY]) {
4510 mask.control[band].legacy = rateset_to_mask(
4511 sband,
4512 nla_data(tb[NL80211_TXRATE_LEGACY]),
4513 nla_len(tb[NL80211_TXRATE_LEGACY]));
4514 if (mask.control[band].legacy == 0) {
4515 err = -EINVAL;
4516 goto unlock;
4517 }
4518 }
4519 }
4520
4521 err = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask);
4522
4523 unlock:
4524 dev_put(dev);
4525 cfg80211_unlock_rdev(rdev);
4526 unlock_rtnl:
4527 rtnl_unlock();
4528 return err;
4529}
4530
4531static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
4532{
4533 struct cfg80211_registered_device *rdev;
4534 struct net_device *dev;
4535 int err;
4536
4537 if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
4538 return -EINVAL;
4539
4540 if (nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]) < 1)
4541 return -EINVAL;
4542
4543 rtnl_lock();
4544
4545 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4546 if (err)
4547 goto unlock_rtnl;
4548
4549 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
4550 err = -EOPNOTSUPP;
4551 goto out;
4552 }
4553
4554 /* not much point in registering if we can't reply */
4555 if (!rdev->ops->action) {
4556 err = -EOPNOTSUPP;
4557 goto out;
4558 }
4559
4560 err = cfg80211_mlme_register_action(dev->ieee80211_ptr, info->snd_pid,
4561 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
4562 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
4563 out:
4564 cfg80211_unlock_rdev(rdev);
4565 dev_put(dev);
4566 unlock_rtnl:
4567 rtnl_unlock();
4568 return err;
4569}
4570
4571static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4572{
4573 struct cfg80211_registered_device *rdev;
4574 struct net_device *dev;
4575 struct ieee80211_channel *chan;
4576 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
4577 u32 freq;
4578 int err;
4579 void *hdr;
4580 u64 cookie;
4581 struct sk_buff *msg;
4582
4583 if (!info->attrs[NL80211_ATTR_FRAME] ||
4584 !info->attrs[NL80211_ATTR_WIPHY_FREQ])
4585 return -EINVAL;
4586
4587 rtnl_lock();
4588
4589 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4590 if (err)
4591 goto unlock_rtnl;
4592
4593 if (!rdev->ops->action) {
4594 err = -EOPNOTSUPP;
4595 goto out;
4596 }
4597
4598 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
4599 err = -EOPNOTSUPP;
4600 goto out;
4601 }
4602
4603 if (!netif_running(dev)) {
4604 err = -ENETDOWN;
4605 goto out;
4606 }
4607
4608 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4609 channel_type = nla_get_u32(
4610 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4611 if (channel_type != NL80211_CHAN_NO_HT &&
4612 channel_type != NL80211_CHAN_HT20 &&
4613 channel_type != NL80211_CHAN_HT40PLUS &&
4614 channel_type != NL80211_CHAN_HT40MINUS)
4615 err = -EINVAL;
4616 goto out;
4617 }
4618
4619 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
4620 chan = rdev_freq_to_chan(rdev, freq, channel_type);
4621 if (chan == NULL) {
4622 err = -EINVAL;
4623 goto out;
4624 }
4625
4626 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4627 if (!msg) {
4628 err = -ENOMEM;
4629 goto out;
4630 }
4631
4632 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4633 NL80211_CMD_ACTION);
4634
4635 if (IS_ERR(hdr)) {
4636 err = PTR_ERR(hdr);
4637 goto free_msg;
4638 }
4639 err = cfg80211_mlme_action(rdev, dev, chan, channel_type,
4640 nla_data(info->attrs[NL80211_ATTR_FRAME]),
4641 nla_len(info->attrs[NL80211_ATTR_FRAME]),
4642 &cookie);
4643 if (err)
4644 goto free_msg;
4645
4646 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
4647
4648 genlmsg_end(msg, hdr);
4649 err = genlmsg_reply(msg, info);
4650 goto out;
4651
4652 nla_put_failure:
4653 err = -ENOBUFS;
4654 free_msg:
4655 nlmsg_free(msg);
4656 out:
4657 cfg80211_unlock_rdev(rdev);
4658 dev_put(dev);
4659unlock_rtnl:
4660 rtnl_unlock();
4661 return err;
4662}
4663
4664static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
4665{
4666 struct cfg80211_registered_device *rdev;
4667 struct wireless_dev *wdev;
4668 struct net_device *dev;
4669 u8 ps_state;
4670 bool state;
4671 int err;
4672
4673 if (!info->attrs[NL80211_ATTR_PS_STATE]) {
4674 err = -EINVAL;
4675 goto out;
4676 }
4677
4678 ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]);
4679
4680 if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED) {
4681 err = -EINVAL;
4682 goto out;
4683 }
4684
4685 rtnl_lock();
4686
4687 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4688 if (err)
4689 goto unlock_rdev;
4690
4691 wdev = dev->ieee80211_ptr;
4692
4693 if (!rdev->ops->set_power_mgmt) {
4694 err = -EOPNOTSUPP;
4695 goto unlock_rdev;
4696 }
4697
4698 state = (ps_state == NL80211_PS_ENABLED) ? true : false;
4699
4700 if (state == wdev->ps)
4701 goto unlock_rdev;
4702
4703 wdev->ps = state;
4704
4705 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, wdev->ps,
4706 wdev->ps_timeout))
4707 /* assume this means it's off */
4708 wdev->ps = false;
4709
4710unlock_rdev:
4711 cfg80211_unlock_rdev(rdev);
4712 dev_put(dev);
4713 rtnl_unlock();
4714
4715out:
4716 return err;
4717}
4718
4719static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
4720{
4721 struct cfg80211_registered_device *rdev;
4722 enum nl80211_ps_state ps_state;
4723 struct wireless_dev *wdev;
4724 struct net_device *dev;
4725 struct sk_buff *msg;
4726 void *hdr;
4727 int err;
4728
4729 rtnl_lock();
4730
4731 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4732 if (err)
4733 goto unlock_rtnl;
4734
4735 wdev = dev->ieee80211_ptr;
4736
4737 if (!rdev->ops->set_power_mgmt) {
4738 err = -EOPNOTSUPP;
4739 goto out;
4740 }
4741
4742 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4743 if (!msg) {
4744 err = -ENOMEM;
4745 goto out;
4746 }
4747
4748 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4749 NL80211_CMD_GET_POWER_SAVE);
4750 if (!hdr) {
4751 err = -ENOMEM;
4752 goto free_msg;
4753 }
4754
4755 if (wdev->ps)
4756 ps_state = NL80211_PS_ENABLED;
4757 else
4758 ps_state = NL80211_PS_DISABLED;
4759
4760 NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state);
4761
4762 genlmsg_end(msg, hdr);
4763 err = genlmsg_reply(msg, info);
4764 goto out;
4765
4766nla_put_failure:
4767 err = -ENOBUFS;
4768
4769free_msg:
4770 nlmsg_free(msg);
4771
4772out:
4773 cfg80211_unlock_rdev(rdev);
4774 dev_put(dev);
4775
4776unlock_rtnl:
4777 rtnl_unlock();
4778
4779 return err;
4780}
4781
4325static struct genl_ops nl80211_ops[] = { 4782static struct genl_ops nl80211_ops[] = {
4326 { 4783 {
4327 .cmd = NL80211_CMD_GET_WIPHY, 4784 .cmd = NL80211_CMD_GET_WIPHY,
@@ -4584,8 +5041,50 @@ static struct genl_ops nl80211_ops[] = {
4584 .policy = nl80211_policy, 5041 .policy = nl80211_policy,
4585 .flags = GENL_ADMIN_PERM, 5042 .flags = GENL_ADMIN_PERM,
4586 }, 5043 },
4587 5044 {
5045 .cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
5046 .doit = nl80211_remain_on_channel,
5047 .policy = nl80211_policy,
5048 .flags = GENL_ADMIN_PERM,
5049 },
5050 {
5051 .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
5052 .doit = nl80211_cancel_remain_on_channel,
5053 .policy = nl80211_policy,
5054 .flags = GENL_ADMIN_PERM,
5055 },
5056 {
5057 .cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
5058 .doit = nl80211_set_tx_bitrate_mask,
5059 .policy = nl80211_policy,
5060 .flags = GENL_ADMIN_PERM,
5061 },
5062 {
5063 .cmd = NL80211_CMD_REGISTER_ACTION,
5064 .doit = nl80211_register_action,
5065 .policy = nl80211_policy,
5066 .flags = GENL_ADMIN_PERM,
5067 },
5068 {
5069 .cmd = NL80211_CMD_ACTION,
5070 .doit = nl80211_action,
5071 .policy = nl80211_policy,
5072 .flags = GENL_ADMIN_PERM,
5073 },
5074 {
5075 .cmd = NL80211_CMD_SET_POWER_SAVE,
5076 .doit = nl80211_set_power_save,
5077 .policy = nl80211_policy,
5078 .flags = GENL_ADMIN_PERM,
5079 },
5080 {
5081 .cmd = NL80211_CMD_GET_POWER_SAVE,
5082 .doit = nl80211_get_power_save,
5083 .policy = nl80211_policy,
5084 /* can be retrieved by unprivileged users */
5085 },
4588}; 5086};
5087
4589static struct genl_multicast_group nl80211_mlme_mcgrp = { 5088static struct genl_multicast_group nl80211_mlme_mcgrp = {
4590 .name = "mlme", 5089 .name = "mlme",
4591}; 5090};
@@ -5173,6 +5672,193 @@ nla_put_failure:
5173 nlmsg_free(msg); 5672 nlmsg_free(msg);
5174} 5673}
5175 5674
5675static void nl80211_send_remain_on_chan_event(
5676 int cmd, struct cfg80211_registered_device *rdev,
5677 struct net_device *netdev, u64 cookie,
5678 struct ieee80211_channel *chan,
5679 enum nl80211_channel_type channel_type,
5680 unsigned int duration, gfp_t gfp)
5681{
5682 struct sk_buff *msg;
5683 void *hdr;
5684
5685 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5686 if (!msg)
5687 return;
5688
5689 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
5690 if (!hdr) {
5691 nlmsg_free(msg);
5692 return;
5693 }
5694
5695 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5696 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5697 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
5698 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
5699 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
5700
5701 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
5702 NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
5703
5704 if (genlmsg_end(msg, hdr) < 0) {
5705 nlmsg_free(msg);
5706 return;
5707 }
5708
5709 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5710 nl80211_mlme_mcgrp.id, gfp);
5711 return;
5712
5713 nla_put_failure:
5714 genlmsg_cancel(msg, hdr);
5715 nlmsg_free(msg);
5716}
5717
5718void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
5719 struct net_device *netdev, u64 cookie,
5720 struct ieee80211_channel *chan,
5721 enum nl80211_channel_type channel_type,
5722 unsigned int duration, gfp_t gfp)
5723{
5724 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
5725 rdev, netdev, cookie, chan,
5726 channel_type, duration, gfp);
5727}
5728
5729void nl80211_send_remain_on_channel_cancel(
5730 struct cfg80211_registered_device *rdev, struct net_device *netdev,
5731 u64 cookie, struct ieee80211_channel *chan,
5732 enum nl80211_channel_type channel_type, gfp_t gfp)
5733{
5734 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
5735 rdev, netdev, cookie, chan,
5736 channel_type, 0, gfp);
5737}
5738
5739void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
5740 struct net_device *dev, const u8 *mac_addr,
5741 struct station_info *sinfo, gfp_t gfp)
5742{
5743 struct sk_buff *msg;
5744
5745 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
5746 if (!msg)
5747 return;
5748
5749 if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
5750 nlmsg_free(msg);
5751 return;
5752 }
5753
5754 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5755 nl80211_mlme_mcgrp.id, gfp);
5756}
5757
5758int nl80211_send_action(struct cfg80211_registered_device *rdev,
5759 struct net_device *netdev, u32 nlpid,
5760 int freq, const u8 *buf, size_t len, gfp_t gfp)
5761{
5762 struct sk_buff *msg;
5763 void *hdr;
5764 int err;
5765
5766 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5767 if (!msg)
5768 return -ENOMEM;
5769
5770 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION);
5771 if (!hdr) {
5772 nlmsg_free(msg);
5773 return -ENOMEM;
5774 }
5775
5776 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5777 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5778 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
5779 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
5780
5781 err = genlmsg_end(msg, hdr);
5782 if (err < 0) {
5783 nlmsg_free(msg);
5784 return err;
5785 }
5786
5787 err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
5788 if (err < 0)
5789 return err;
5790 return 0;
5791
5792 nla_put_failure:
5793 genlmsg_cancel(msg, hdr);
5794 nlmsg_free(msg);
5795 return -ENOBUFS;
5796}
5797
5798void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
5799 struct net_device *netdev, u64 cookie,
5800 const u8 *buf, size_t len, bool ack,
5801 gfp_t gfp)
5802{
5803 struct sk_buff *msg;
5804 void *hdr;
5805
5806 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5807 if (!msg)
5808 return;
5809
5810 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION_TX_STATUS);
5811 if (!hdr) {
5812 nlmsg_free(msg);
5813 return;
5814 }
5815
5816 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5817 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5818 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
5819 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
5820 if (ack)
5821 NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
5822
5823 if (genlmsg_end(msg, hdr) < 0) {
5824 nlmsg_free(msg);
5825 return;
5826 }
5827
5828 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
5829 return;
5830
5831 nla_put_failure:
5832 genlmsg_cancel(msg, hdr);
5833 nlmsg_free(msg);
5834}
5835
5836static int nl80211_netlink_notify(struct notifier_block * nb,
5837 unsigned long state,
5838 void *_notify)
5839{
5840 struct netlink_notify *notify = _notify;
5841 struct cfg80211_registered_device *rdev;
5842 struct wireless_dev *wdev;
5843
5844 if (state != NETLINK_URELEASE)
5845 return NOTIFY_DONE;
5846
5847 rcu_read_lock();
5848
5849 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
5850 list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
5851 cfg80211_mlme_unregister_actions(wdev, notify->pid);
5852
5853 rcu_read_unlock();
5854
5855 return NOTIFY_DONE;
5856}
5857
5858static struct notifier_block nl80211_netlink_notifier = {
5859 .notifier_call = nl80211_netlink_notify,
5860};
5861
5176/* initialisation/exit functions */ 5862/* initialisation/exit functions */
5177 5863
5178int nl80211_init(void) 5864int nl80211_init(void)
@@ -5206,6 +5892,10 @@ int nl80211_init(void)
5206 goto err_out; 5892 goto err_out;
5207#endif 5893#endif
5208 5894
5895 err = netlink_register_notifier(&nl80211_netlink_notifier);
5896 if (err)
5897 goto err_out;
5898
5209 return 0; 5899 return 0;
5210 err_out: 5900 err_out:
5211 genl_unregister_family(&nl80211_fam); 5901 genl_unregister_family(&nl80211_fam);
@@ -5214,5 +5904,6 @@ int nl80211_init(void)
5214 5904
5215void nl80211_exit(void) 5905void nl80211_exit(void)
5216{ 5906{
5907 netlink_unregister_notifier(&nl80211_netlink_notifier);
5217 genl_unregister_family(&nl80211_fam); 5908 genl_unregister_family(&nl80211_fam);
5218} 5909}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 44cc2a76a1b0..4ca511102c6c 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -59,4 +59,27 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
59 struct net_device *netdev, const u8 *bssid, 59 struct net_device *netdev, const u8 *bssid,
60 gfp_t gfp); 60 gfp_t gfp);
61 61
62void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
63 struct net_device *netdev,
64 u64 cookie,
65 struct ieee80211_channel *chan,
66 enum nl80211_channel_type channel_type,
67 unsigned int duration, gfp_t gfp);
68void nl80211_send_remain_on_channel_cancel(
69 struct cfg80211_registered_device *rdev, struct net_device *netdev,
70 u64 cookie, struct ieee80211_channel *chan,
71 enum nl80211_channel_type channel_type, gfp_t gfp);
72
73void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
74 struct net_device *dev, const u8 *mac_addr,
75 struct station_info *sinfo, gfp_t gfp);
76
77int nl80211_send_action(struct cfg80211_registered_device *rdev,
78 struct net_device *netdev, u32 nlpid, int freq,
79 const u8 *buf, size_t len, gfp_t gfp);
80void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
81 struct net_device *netdev, u64 cookie,
82 const u8 *buf, size_t len, bool ack,
83 gfp_t gfp);
84
62#endif /* __NET_WIRELESS_NL80211_H */ 85#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index f591871a7b4f..1332c445d1c7 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -2,6 +2,16 @@
2 * Radiotap parser 2 * Radiotap parser
3 * 3 *
4 * Copyright 2007 Andy Green <andy@warmcat.com> 4 * Copyright 2007 Andy Green <andy@warmcat.com>
5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Alternatively, this software may be distributed under the terms of BSD
12 * license.
13 *
14 * See COPYING for more details.
5 */ 15 */
6 16
7#include <net/cfg80211.h> 17#include <net/cfg80211.h>
@@ -10,6 +20,35 @@
10 20
11/* function prototypes and related defs are in include/net/cfg80211.h */ 21/* function prototypes and related defs are in include/net/cfg80211.h */
12 22
23static const struct radiotap_align_size rtap_namespace_sizes[] = {
24 [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, },
25 [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, },
26 [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, },
27 [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, },
28 [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, },
29 [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, },
30 [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, },
31 [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, },
32 [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, },
33 [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, },
34 [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, },
35 [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, },
36 [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, },
37 [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, },
38 [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, },
39 [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
40 [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
41 [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
42 /*
43 * add more here as they are defined in radiotap.h
44 */
45};
46
47static const struct ieee80211_radiotap_namespace radiotap_ns = {
48 .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]),
49 .align_size = rtap_namespace_sizes,
50};
51
13/** 52/**
14 * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization 53 * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization
15 * @iterator: radiotap_iterator to initialize 54 * @iterator: radiotap_iterator to initialize
@@ -50,9 +89,9 @@
50 */ 89 */
51 90
52int ieee80211_radiotap_iterator_init( 91int ieee80211_radiotap_iterator_init(
53 struct ieee80211_radiotap_iterator *iterator, 92 struct ieee80211_radiotap_iterator *iterator,
54 struct ieee80211_radiotap_header *radiotap_header, 93 struct ieee80211_radiotap_header *radiotap_header,
55 int max_length) 94 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
56{ 95{
57 /* Linux only supports version 0 radiotap format */ 96 /* Linux only supports version 0 radiotap format */
58 if (radiotap_header->it_version) 97 if (radiotap_header->it_version)
@@ -62,19 +101,24 @@ int ieee80211_radiotap_iterator_init(
62 if (max_length < get_unaligned_le16(&radiotap_header->it_len)) 101 if (max_length < get_unaligned_le16(&radiotap_header->it_len))
63 return -EINVAL; 102 return -EINVAL;
64 103
65 iterator->rtheader = radiotap_header; 104 iterator->_rtheader = radiotap_header;
66 iterator->max_length = get_unaligned_le16(&radiotap_header->it_len); 105 iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len);
67 iterator->arg_index = 0; 106 iterator->_arg_index = 0;
68 iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); 107 iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
69 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); 108 iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header);
70 iterator->this_arg = NULL; 109 iterator->_reset_on_ext = 0;
110 iterator->_next_bitmap = &radiotap_header->it_present;
111 iterator->_next_bitmap++;
112 iterator->_vns = vns;
113 iterator->current_namespace = &radiotap_ns;
114 iterator->is_radiotap_ns = 1;
71 115
72 /* find payload start allowing for extended bitmap(s) */ 116 /* find payload start allowing for extended bitmap(s) */
73 117
74 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { 118 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
75 while (get_unaligned_le32(iterator->arg) & 119 while (get_unaligned_le32(iterator->_arg) &
76 (1 << IEEE80211_RADIOTAP_EXT)) { 120 (1 << IEEE80211_RADIOTAP_EXT)) {
77 iterator->arg += sizeof(u32); 121 iterator->_arg += sizeof(uint32_t);
78 122
79 /* 123 /*
80 * check for insanity where the present bitmaps 124 * check for insanity where the present bitmaps
@@ -82,12 +126,13 @@ int ieee80211_radiotap_iterator_init(
82 * stated radiotap header length 126 * stated radiotap header length
83 */ 127 */
84 128
85 if (((ulong)iterator->arg - 129 if ((unsigned long)iterator->_arg -
86 (ulong)iterator->rtheader) > iterator->max_length) 130 (unsigned long)iterator->_rtheader >
131 (unsigned long)iterator->_max_length)
87 return -EINVAL; 132 return -EINVAL;
88 } 133 }
89 134
90 iterator->arg += sizeof(u32); 135 iterator->_arg += sizeof(uint32_t);
91 136
92 /* 137 /*
93 * no need to check again for blowing past stated radiotap 138 * no need to check again for blowing past stated radiotap
@@ -96,12 +141,36 @@ int ieee80211_radiotap_iterator_init(
96 */ 141 */
97 } 142 }
98 143
144 iterator->this_arg = iterator->_arg;
145
99 /* we are all initialized happily */ 146 /* we are all initialized happily */
100 147
101 return 0; 148 return 0;
102} 149}
103EXPORT_SYMBOL(ieee80211_radiotap_iterator_init); 150EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
104 151
152static void find_ns(struct ieee80211_radiotap_iterator *iterator,
153 uint32_t oui, uint8_t subns)
154{
155 int i;
156
157 iterator->current_namespace = NULL;
158
159 if (!iterator->_vns)
160 return;
161
162 for (i = 0; i < iterator->_vns->n_ns; i++) {
163 if (iterator->_vns->ns[i].oui != oui)
164 continue;
165 if (iterator->_vns->ns[i].subns != subns)
166 continue;
167
168 iterator->current_namespace = &iterator->_vns->ns[i];
169 break;
170 }
171}
172
173
105 174
106/** 175/**
107 * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg 176 * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg
@@ -127,99 +196,80 @@ EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
127 */ 196 */
128 197
129int ieee80211_radiotap_iterator_next( 198int ieee80211_radiotap_iterator_next(
130 struct ieee80211_radiotap_iterator *iterator) 199 struct ieee80211_radiotap_iterator *iterator)
131{ 200{
132 201 while (1) {
133 /*
134 * small length lookup table for all radiotap types we heard of
135 * starting from b0 in the bitmap, so we can walk the payload
136 * area of the radiotap header
137 *
138 * There is a requirement to pad args, so that args
139 * of a given length must begin at a boundary of that length
140 * -- but note that compound args are allowed (eg, 2 x u16
141 * for IEEE80211_RADIOTAP_CHANNEL) so total arg length is not
142 * a reliable indicator of alignment requirement.
143 *
144 * upper nybble: content alignment for arg
145 * lower nybble: content length for arg
146 */
147
148 static const u8 rt_sizes[] = {
149 [IEEE80211_RADIOTAP_TSFT] = 0x88,
150 [IEEE80211_RADIOTAP_FLAGS] = 0x11,
151 [IEEE80211_RADIOTAP_RATE] = 0x11,
152 [IEEE80211_RADIOTAP_CHANNEL] = 0x24,
153 [IEEE80211_RADIOTAP_FHSS] = 0x22,
154 [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = 0x11,
155 [IEEE80211_RADIOTAP_DBM_ANTNOISE] = 0x11,
156 [IEEE80211_RADIOTAP_LOCK_QUALITY] = 0x22,
157 [IEEE80211_RADIOTAP_TX_ATTENUATION] = 0x22,
158 [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = 0x22,
159 [IEEE80211_RADIOTAP_DBM_TX_POWER] = 0x11,
160 [IEEE80211_RADIOTAP_ANTENNA] = 0x11,
161 [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = 0x11,
162 [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11,
163 [IEEE80211_RADIOTAP_RX_FLAGS] = 0x22,
164 [IEEE80211_RADIOTAP_TX_FLAGS] = 0x22,
165 [IEEE80211_RADIOTAP_RTS_RETRIES] = 0x11,
166 [IEEE80211_RADIOTAP_DATA_RETRIES] = 0x11,
167 /*
168 * add more here as they are defined in
169 * include/net/ieee80211_radiotap.h
170 */
171 };
172
173 /*
174 * for every radiotap entry we can at
175 * least skip (by knowing the length)...
176 */
177
178 while (iterator->arg_index < sizeof(rt_sizes)) {
179 int hit = 0; 202 int hit = 0;
180 int pad; 203 int pad, align, size, subns, vnslen;
204 uint32_t oui;
181 205
182 if (!(iterator->bitmap_shifter & 1)) 206 /* if no more EXT bits, that's it */
207 if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT &&
208 !(iterator->_bitmap_shifter & 1))
209 return -ENOENT;
210
211 if (!(iterator->_bitmap_shifter & 1))
183 goto next_entry; /* arg not present */ 212 goto next_entry; /* arg not present */
184 213
214 /* get alignment/size of data */
215 switch (iterator->_arg_index % 32) {
216 case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
217 case IEEE80211_RADIOTAP_EXT:
218 align = 1;
219 size = 0;
220 break;
221 case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
222 align = 2;
223 size = 6;
224 break;
225 default:
226 if (!iterator->current_namespace ||
227 iterator->_arg_index >= iterator->current_namespace->n_bits) {
228 if (iterator->current_namespace == &radiotap_ns)
229 return -ENOENT;
230 align = 0;
231 } else {
232 align = iterator->current_namespace->align_size[iterator->_arg_index].align;
233 size = iterator->current_namespace->align_size[iterator->_arg_index].size;
234 }
235 if (!align) {
236 /* skip all subsequent data */
237 iterator->_arg = iterator->_next_ns_data;
238 /* give up on this namespace */
239 iterator->current_namespace = NULL;
240 goto next_entry;
241 }
242 break;
243 }
244
185 /* 245 /*
186 * arg is present, account for alignment padding 246 * arg is present, account for alignment padding
187 * 8-bit args can be at any alignment
188 * 16-bit args must start on 16-bit boundary
189 * 32-bit args must start on 32-bit boundary
190 * 64-bit args must start on 64-bit boundary
191 * 247 *
192 * note that total arg size can differ from alignment of 248 * Note that these alignments are relative to the start
193 * elements inside arg, so we use upper nybble of length 249 * of the radiotap header. There is no guarantee
194 * table to base alignment on
195 *
196 * also note: these alignments are ** relative to the
197 * start of the radiotap header **. There is no guarantee
198 * that the radiotap header itself is aligned on any 250 * that the radiotap header itself is aligned on any
199 * kind of boundary. 251 * kind of boundary.
200 * 252 *
201 * the above is why get_unaligned() is used to dereference 253 * The above is why get_unaligned() is used to dereference
202 * multibyte elements from the radiotap area 254 * multibyte elements from the radiotap area.
203 */ 255 */
204 256
205 pad = (((ulong)iterator->arg) - 257 pad = ((unsigned long)iterator->_arg -
206 ((ulong)iterator->rtheader)) & 258 (unsigned long)iterator->_rtheader) & (align - 1);
207 ((rt_sizes[iterator->arg_index] >> 4) - 1);
208 259
209 if (pad) 260 if (pad)
210 iterator->arg += 261 iterator->_arg += align - pad;
211 (rt_sizes[iterator->arg_index] >> 4) - pad;
212 262
213 /* 263 /*
214 * this is what we will return to user, but we need to 264 * this is what we will return to user, but we need to
215 * move on first so next call has something fresh to test 265 * move on first so next call has something fresh to test
216 */ 266 */
217 iterator->this_arg_index = iterator->arg_index; 267 iterator->this_arg_index = iterator->_arg_index;
218 iterator->this_arg = iterator->arg; 268 iterator->this_arg = iterator->_arg;
219 hit = 1; 269 iterator->this_arg_size = size;
220 270
221 /* internally move on the size of this arg */ 271 /* internally move on the size of this arg */
222 iterator->arg += rt_sizes[iterator->arg_index] & 0x0f; 272 iterator->_arg += size;
223 273
224 /* 274 /*
225 * check for insanity where we are given a bitmap that 275 * check for insanity where we are given a bitmap that
@@ -228,32 +278,73 @@ int ieee80211_radiotap_iterator_next(
228 * max_length on the last arg, never exceeding it. 278 * max_length on the last arg, never exceeding it.
229 */ 279 */
230 280
231 if (((ulong)iterator->arg - (ulong)iterator->rtheader) > 281 if ((unsigned long)iterator->_arg -
232 iterator->max_length) 282 (unsigned long)iterator->_rtheader >
283 (unsigned long)iterator->_max_length)
233 return -EINVAL; 284 return -EINVAL;
234 285
235 next_entry: 286 /* these special ones are valid in each bitmap word */
236 iterator->arg_index++; 287 switch (iterator->_arg_index % 32) {
237 if (unlikely((iterator->arg_index & 31) == 0)) { 288 case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
238 /* completed current u32 bitmap */ 289 iterator->_bitmap_shifter >>= 1;
239 if (iterator->bitmap_shifter & 1) { 290 iterator->_arg_index++;
240 /* b31 was set, there is more */ 291
241 /* move to next u32 bitmap */ 292 iterator->_reset_on_ext = 1;
242 iterator->bitmap_shifter = 293
243 get_unaligned_le32(iterator->next_bitmap); 294 vnslen = get_unaligned_le16(iterator->this_arg + 4);
244 iterator->next_bitmap++; 295 iterator->_next_ns_data = iterator->_arg + vnslen;
245 } else 296 oui = (*iterator->this_arg << 16) |
246 /* no more bitmaps: end */ 297 (*(iterator->this_arg + 1) << 8) |
247 iterator->arg_index = sizeof(rt_sizes); 298 *(iterator->this_arg + 2);
248 } else /* just try the next bit */ 299 subns = *(iterator->this_arg + 3);
249 iterator->bitmap_shifter >>= 1; 300
301 find_ns(iterator, oui, subns);
302
303 iterator->is_radiotap_ns = 0;
304 /* allow parsers to show this information */
305 iterator->this_arg_index =
306 IEEE80211_RADIOTAP_VENDOR_NAMESPACE;
307 iterator->this_arg_size += vnslen;
308 if ((unsigned long)iterator->this_arg +
309 iterator->this_arg_size -
310 (unsigned long)iterator->_rtheader >
311 (unsigned long)(unsigned long)iterator->_max_length)
312 return -EINVAL;
313 hit = 1;
314 break;
315 case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
316 iterator->_bitmap_shifter >>= 1;
317 iterator->_arg_index++;
318
319 iterator->_reset_on_ext = 1;
320 iterator->current_namespace = &radiotap_ns;
321 iterator->is_radiotap_ns = 1;
322 break;
323 case IEEE80211_RADIOTAP_EXT:
324 /*
325 * bit 31 was set, there is more
326 * -- move to next u32 bitmap
327 */
328 iterator->_bitmap_shifter =
329 get_unaligned_le32(iterator->_next_bitmap);
330 iterator->_next_bitmap++;
331 if (iterator->_reset_on_ext)
332 iterator->_arg_index = 0;
333 else
334 iterator->_arg_index++;
335 iterator->_reset_on_ext = 0;
336 break;
337 default:
338 /* we've got a hit! */
339 hit = 1;
340 next_entry:
341 iterator->_bitmap_shifter >>= 1;
342 iterator->_arg_index++;
343 }
250 344
251 /* if we found a valid arg earlier, return it now */ 345 /* if we found a valid arg earlier, return it now */
252 if (hit) 346 if (hit)
253 return 0; 347 return 0;
254 } 348 }
255
256 /* we don't know how to handle any more args, we're done */
257 return -ENOENT;
258} 349}
259EXPORT_SYMBOL(ieee80211_radiotap_iterator_next); 350EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7a0754c92df4..422da20d1e5b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -33,6 +33,7 @@
33 * 33 *
34 */ 34 */
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/slab.h>
36#include <linux/list.h> 37#include <linux/list.h>
37#include <linux/random.h> 38#include <linux/random.h>
38#include <linux/nl80211.h> 39#include <linux/nl80211.h>
@@ -40,8 +41,18 @@
40#include <net/cfg80211.h> 41#include <net/cfg80211.h>
41#include "core.h" 42#include "core.h"
42#include "reg.h" 43#include "reg.h"
44#include "regdb.h"
43#include "nl80211.h" 45#include "nl80211.h"
44 46
47#ifdef CONFIG_CFG80211_REG_DEBUG
48#define REG_DBG_PRINT(format, args...) \
49 do { \
50 printk(KERN_DEBUG format , ## args); \
51 } while (0)
52#else
53#define REG_DBG_PRINT(args...)
54#endif
55
45/* Receipt of information from last regulatory request */ 56/* Receipt of information from last regulatory request */
46static struct regulatory_request *last_request; 57static struct regulatory_request *last_request;
47 58
@@ -124,82 +135,11 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom =
124 &world_regdom; 135 &world_regdom;
125 136
126static char *ieee80211_regdom = "00"; 137static char *ieee80211_regdom = "00";
138static char user_alpha2[2];
127 139
128module_param(ieee80211_regdom, charp, 0444); 140module_param(ieee80211_regdom, charp, 0444);
129MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 141MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
130 142
131#ifdef CONFIG_WIRELESS_OLD_REGULATORY
132/*
133 * We assume 40 MHz bandwidth for the old regulatory work.
134 * We make emphasis we are using the exact same frequencies
135 * as before
136 */
137
138static const struct ieee80211_regdomain us_regdom = {
139 .n_reg_rules = 6,
140 .alpha2 = "US",
141 .reg_rules = {
142 /* IEEE 802.11b/g, channels 1..11 */
143 REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
144 /* IEEE 802.11a, channel 36..48 */
145 REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
146 /* IEEE 802.11a, channels 48..64 */
147 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
148 /* IEEE 802.11a, channels 100..124 */
149 REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
150 /* IEEE 802.11a, channels 132..144 */
151 REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
152 /* IEEE 802.11a, channels 149..165, outdoor */
153 REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
154 }
155};
156
157static const struct ieee80211_regdomain jp_regdom = {
158 .n_reg_rules = 6,
159 .alpha2 = "JP",
160 .reg_rules = {
161 /* IEEE 802.11b/g, channels 1..11 */
162 REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
163 /* IEEE 802.11b/g, channels 12..13 */
164 REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
165 /* IEEE 802.11b/g, channel 14 */
166 REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
167 /* IEEE 802.11a, channels 36..48 */
168 REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
169 /* IEEE 802.11a, channels 52..64 */
170 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
171 /* IEEE 802.11a, channels 100..144 */
172 REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
173 }
174};
175
176static const struct ieee80211_regdomain *static_regdom(char *alpha2)
177{
178 if (alpha2[0] == 'U' && alpha2[1] == 'S')
179 return &us_regdom;
180 if (alpha2[0] == 'J' && alpha2[1] == 'P')
181 return &jp_regdom;
182 /* Use world roaming rules for "EU", since it was a pseudo
183 domain anyway... */
184 if (alpha2[0] == 'E' && alpha2[1] == 'U')
185 return &world_regdom;
186 /* Default, world roaming rules */
187 return &world_regdom;
188}
189
190static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
191{
192 if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
193 return true;
194 return false;
195}
196#else
197static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
198{
199 return false;
200}
201#endif
202
203static void reset_regdomains(void) 143static void reset_regdomains(void)
204{ 144{
205 /* avoid freeing static information or freeing something twice */ 145 /* avoid freeing static information or freeing something twice */
@@ -209,8 +149,6 @@ static void reset_regdomains(void)
209 cfg80211_world_regdom = NULL; 149 cfg80211_world_regdom = NULL;
210 if (cfg80211_regdomain == &world_regdom) 150 if (cfg80211_regdomain == &world_regdom)
211 cfg80211_regdomain = NULL; 151 cfg80211_regdomain = NULL;
212 if (is_old_static_regdom(cfg80211_regdomain))
213 cfg80211_regdomain = NULL;
214 152
215 kfree(cfg80211_regdomain); 153 kfree(cfg80211_regdomain);
216 kfree(cfg80211_world_regdom); 154 kfree(cfg80211_world_regdom);
@@ -316,6 +254,27 @@ static bool regdom_changes(const char *alpha2)
316 return true; 254 return true;
317} 255}
318 256
257/*
258 * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets
259 * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER
260 * has ever been issued.
261 */
262static bool is_user_regdom_saved(void)
263{
264 if (user_alpha2[0] == '9' && user_alpha2[1] == '7')
265 return false;
266
267 /* This would indicate a mistake on the design */
268 if (WARN((!is_world_regdom(user_alpha2) &&
269 !is_an_alpha2(user_alpha2)),
270 "Unexpected user alpha2: %c%c\n",
271 user_alpha2[0],
272 user_alpha2[1]))
273 return false;
274
275 return true;
276}
277
319/** 278/**
320 * country_ie_integrity_changes - tells us if the country IE has changed 279 * country_ie_integrity_changes - tells us if the country IE has changed
321 * @checksum: checksum of country IE of fields we are interested in 280 * @checksum: checksum of country IE of fields we are interested in
@@ -335,6 +294,96 @@ static bool country_ie_integrity_changes(u32 checksum)
335 return false; 294 return false;
336} 295}
337 296
297static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
298 const struct ieee80211_regdomain *src_regd)
299{
300 struct ieee80211_regdomain *regd;
301 int size_of_regd = 0;
302 unsigned int i;
303
304 size_of_regd = sizeof(struct ieee80211_regdomain) +
305 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
306
307 regd = kzalloc(size_of_regd, GFP_KERNEL);
308 if (!regd)
309 return -ENOMEM;
310
311 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
312
313 for (i = 0; i < src_regd->n_reg_rules; i++)
314 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
315 sizeof(struct ieee80211_reg_rule));
316
317 *dst_regd = regd;
318 return 0;
319}
320
321#ifdef CONFIG_CFG80211_INTERNAL_REGDB
322struct reg_regdb_search_request {
323 char alpha2[2];
324 struct list_head list;
325};
326
327static LIST_HEAD(reg_regdb_search_list);
328static DEFINE_MUTEX(reg_regdb_search_mutex);
329
330static void reg_regdb_search(struct work_struct *work)
331{
332 struct reg_regdb_search_request *request;
333 const struct ieee80211_regdomain *curdom, *regdom;
334 int i, r;
335
336 mutex_lock(&reg_regdb_search_mutex);
337 while (!list_empty(&reg_regdb_search_list)) {
338 request = list_first_entry(&reg_regdb_search_list,
339 struct reg_regdb_search_request,
340 list);
341 list_del(&request->list);
342
343 for (i=0; i<reg_regdb_size; i++) {
344 curdom = reg_regdb[i];
345
346 if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
347 r = reg_copy_regd(&regdom, curdom);
348 if (r)
349 break;
350 mutex_lock(&cfg80211_mutex);
351 set_regdom(regdom);
352 mutex_unlock(&cfg80211_mutex);
353 break;
354 }
355 }
356
357 kfree(request);
358 }
359 mutex_unlock(&reg_regdb_search_mutex);
360}
361
362static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
363
364static void reg_regdb_query(const char *alpha2)
365{
366 struct reg_regdb_search_request *request;
367
368 if (!alpha2)
369 return;
370
371 request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
372 if (!request)
373 return;
374
375 memcpy(request->alpha2, alpha2, 2);
376
377 mutex_lock(&reg_regdb_search_mutex);
378 list_add_tail(&request->list, &reg_regdb_search_list);
379 mutex_unlock(&reg_regdb_search_mutex);
380
381 schedule_work(&reg_regdb_work);
382}
383#else
384static inline void reg_regdb_query(const char *alpha2) {}
385#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
386
338/* 387/*
339 * This lets us keep regulatory code which is updated on a regulatory 388 * This lets us keep regulatory code which is updated on a regulatory
340 * basis in userspace. 389 * basis in userspace.
@@ -354,6 +403,9 @@ static int call_crda(const char *alpha2)
354 printk(KERN_INFO "cfg80211: Calling CRDA to update world " 403 printk(KERN_INFO "cfg80211: Calling CRDA to update world "
355 "regulatory domain\n"); 404 "regulatory domain\n");
356 405
406 /* query internal regulatory database (if it exists) */
407 reg_regdb_query(alpha2);
408
357 country_env[8] = alpha2[0]; 409 country_env[8] = alpha2[0];
358 country_env[9] = alpha2[1]; 410 country_env[9] = alpha2[1];
359 411
@@ -454,12 +506,212 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
454} 506}
455 507
456/* 508/*
509 * This is a work around for sanity checking ieee80211_channel_to_frequency()'s
510 * work. ieee80211_channel_to_frequency() can for example currently provide a
511 * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be
512 * an AP providing channel 8 on a country IE triplet when it sent this on the
513 * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz
514 * channel.
515 *
516 * This can be removed once ieee80211_channel_to_frequency() takes in a band.
517 */
518static bool chan_in_band(int chan, enum ieee80211_band band)
519{
520 int center_freq = ieee80211_channel_to_frequency(chan);
521
522 switch (band) {
523 case IEEE80211_BAND_2GHZ:
524 if (center_freq <= 2484)
525 return true;
526 return false;
527 case IEEE80211_BAND_5GHZ:
528 if (center_freq >= 5005)
529 return true;
530 return false;
531 default:
532 return false;
533 }
534}
535
536/*
537 * Some APs may send a country IE triplet for each channel they
538 * support and while this is completely overkill and silly we still
539 * need to support it. We avoid making a single rule for each channel
540 * though and to help us with this we use this helper to find the
541 * actual subband end channel. These type of country IE triplet
542 * scenerios are handled then, all yielding two regulaotry rules from
543 * parsing a country IE:
544 *
545 * [1]
546 * [2]
547 * [36]
548 * [40]
549 *
550 * [1]
551 * [2-4]
552 * [5-12]
553 * [36]
554 * [40-44]
555 *
556 * [1-4]
557 * [5-7]
558 * [36-44]
559 * [48-64]
560 *
561 * [36-36]
562 * [40-40]
563 * [44-44]
564 * [48-48]
565 * [52-52]
566 * [56-56]
567 * [60-60]
568 * [64-64]
569 * [100-100]
570 * [104-104]
571 * [108-108]
572 * [112-112]
573 * [116-116]
574 * [120-120]
575 * [124-124]
576 * [128-128]
577 * [132-132]
578 * [136-136]
579 * [140-140]
580 *
581 * Returns 0 if the IE has been found to be invalid in the middle
582 * somewhere.
583 */
584static int max_subband_chan(enum ieee80211_band band,
585 int orig_cur_chan,
586 int orig_end_channel,
587 s8 orig_max_power,
588 u8 **country_ie,
589 u8 *country_ie_len)
590{
591 u8 *triplets_start = *country_ie;
592 u8 len_at_triplet = *country_ie_len;
593 int end_subband_chan = orig_end_channel;
594
595 /*
596 * We'll deal with padding for the caller unless
597 * its not immediate and we don't process any channels
598 */
599 if (*country_ie_len == 1) {
600 *country_ie += 1;
601 *country_ie_len -= 1;
602 return orig_end_channel;
603 }
604
605 /* Move to the next triplet and then start search */
606 *country_ie += 3;
607 *country_ie_len -= 3;
608
609 if (!chan_in_band(orig_cur_chan, band))
610 return 0;
611
612 while (*country_ie_len >= 3) {
613 int end_channel = 0;
614 struct ieee80211_country_ie_triplet *triplet =
615 (struct ieee80211_country_ie_triplet *) *country_ie;
616 int cur_channel = 0, next_expected_chan;
617
618 /* means last triplet is completely unrelated to this one */
619 if (triplet->ext.reg_extension_id >=
620 IEEE80211_COUNTRY_EXTENSION_ID) {
621 *country_ie -= 3;
622 *country_ie_len += 3;
623 break;
624 }
625
626 if (triplet->chans.first_channel == 0) {
627 *country_ie += 1;
628 *country_ie_len -= 1;
629 if (*country_ie_len != 0)
630 return 0;
631 break;
632 }
633
634 if (triplet->chans.num_channels == 0)
635 return 0;
636
637 /* Monitonically increasing channel order */
638 if (triplet->chans.first_channel <= end_subband_chan)
639 return 0;
640
641 if (!chan_in_band(triplet->chans.first_channel, band))
642 return 0;
643
644 /* 2 GHz */
645 if (triplet->chans.first_channel <= 14) {
646 end_channel = triplet->chans.first_channel +
647 triplet->chans.num_channels - 1;
648 }
649 else {
650 end_channel = triplet->chans.first_channel +
651 (4 * (triplet->chans.num_channels - 1));
652 }
653
654 if (!chan_in_band(end_channel, band))
655 return 0;
656
657 if (orig_max_power != triplet->chans.max_power) {
658 *country_ie -= 3;
659 *country_ie_len += 3;
660 break;
661 }
662
663 cur_channel = triplet->chans.first_channel;
664
665 /* The key is finding the right next expected channel */
666 if (band == IEEE80211_BAND_2GHZ)
667 next_expected_chan = end_subband_chan + 1;
668 else
669 next_expected_chan = end_subband_chan + 4;
670
671 if (cur_channel != next_expected_chan) {
672 *country_ie -= 3;
673 *country_ie_len += 3;
674 break;
675 }
676
677 end_subband_chan = end_channel;
678
679 /* Move to the next one */
680 *country_ie += 3;
681 *country_ie_len -= 3;
682
683 /*
684 * Padding needs to be dealt with if we processed
685 * some channels.
686 */
687 if (*country_ie_len == 1) {
688 *country_ie += 1;
689 *country_ie_len -= 1;
690 break;
691 }
692
693 /* If seen, the IE is invalid */
694 if (*country_ie_len == 2)
695 return 0;
696 }
697
698 if (end_subband_chan == orig_end_channel) {
699 *country_ie = triplets_start;
700 *country_ie_len = len_at_triplet;
701 return orig_end_channel;
702 }
703
704 return end_subband_chan;
705}
706
707/*
457 * Converts a country IE to a regulatory domain. A regulatory domain 708 * Converts a country IE to a regulatory domain. A regulatory domain
458 * structure has a lot of information which the IE doesn't yet have, 709 * structure has a lot of information which the IE doesn't yet have,
459 * so for the other values we use upper max values as we will intersect 710 * so for the other values we use upper max values as we will intersect
460 * with our userspace regulatory agent to get lower bounds. 711 * with our userspace regulatory agent to get lower bounds.
461 */ 712 */
462static struct ieee80211_regdomain *country_ie_2_rd( 713static struct ieee80211_regdomain *country_ie_2_rd(
714 enum ieee80211_band band,
463 u8 *country_ie, 715 u8 *country_ie,
464 u8 country_ie_len, 716 u8 country_ie_len,
465 u32 *checksum) 717 u32 *checksum)
@@ -521,10 +773,29 @@ static struct ieee80211_regdomain *country_ie_2_rd(
521 continue; 773 continue;
522 } 774 }
523 775
776 /*
777 * APs can add padding to make length divisible
778 * by two, required by the spec.
779 */
780 if (triplet->chans.first_channel == 0) {
781 country_ie++;
782 country_ie_len--;
783 /* This is expected to be at the very end only */
784 if (country_ie_len != 0)
785 return NULL;
786 break;
787 }
788
789 if (triplet->chans.num_channels == 0)
790 return NULL;
791
792 if (!chan_in_band(triplet->chans.first_channel, band))
793 return NULL;
794
524 /* 2 GHz */ 795 /* 2 GHz */
525 if (triplet->chans.first_channel <= 14) 796 if (band == IEEE80211_BAND_2GHZ)
526 end_channel = triplet->chans.first_channel + 797 end_channel = triplet->chans.first_channel +
527 triplet->chans.num_channels; 798 triplet->chans.num_channels - 1;
528 else 799 else
529 /* 800 /*
530 * 5 GHz -- For example in country IEs if the first 801 * 5 GHz -- For example in country IEs if the first
@@ -539,6 +810,24 @@ static struct ieee80211_regdomain *country_ie_2_rd(
539 (4 * (triplet->chans.num_channels - 1)); 810 (4 * (triplet->chans.num_channels - 1));
540 811
541 cur_channel = triplet->chans.first_channel; 812 cur_channel = triplet->chans.first_channel;
813
814 /*
815 * Enhancement for APs that send a triplet for every channel
816 * or for whatever reason sends triplets with multiple channels
817 * separated when in fact they should be together.
818 */
819 end_channel = max_subband_chan(band,
820 cur_channel,
821 end_channel,
822 triplet->chans.max_power,
823 &country_ie,
824 &country_ie_len);
825 if (!end_channel)
826 return NULL;
827
828 if (!chan_in_band(end_channel, band))
829 return NULL;
830
542 cur_sub_max_channel = end_channel; 831 cur_sub_max_channel = end_channel;
543 832
544 /* Basic sanity check */ 833 /* Basic sanity check */
@@ -569,10 +858,13 @@ static struct ieee80211_regdomain *country_ie_2_rd(
569 858
570 last_sub_max_channel = cur_sub_max_channel; 859 last_sub_max_channel = cur_sub_max_channel;
571 860
572 country_ie += 3;
573 country_ie_len -= 3;
574 num_rules++; 861 num_rules++;
575 862
863 if (country_ie_len >= 3) {
864 country_ie += 3;
865 country_ie_len -= 3;
866 }
867
576 /* 868 /*
577 * Note: this is not a IEEE requirement but 869 * Note: this is not a IEEE requirement but
578 * simply a memory requirement 870 * simply a memory requirement
@@ -615,6 +907,12 @@ static struct ieee80211_regdomain *country_ie_2_rd(
615 continue; 907 continue;
616 } 908 }
617 909
910 if (triplet->chans.first_channel == 0) {
911 country_ie++;
912 country_ie_len--;
913 break;
914 }
915
618 reg_rule = &rd->reg_rules[i]; 916 reg_rule = &rd->reg_rules[i];
619 freq_range = &reg_rule->freq_range; 917 freq_range = &reg_rule->freq_range;
620 power_rule = &reg_rule->power_rule; 918 power_rule = &reg_rule->power_rule;
@@ -622,13 +920,20 @@ static struct ieee80211_regdomain *country_ie_2_rd(
622 reg_rule->flags = flags; 920 reg_rule->flags = flags;
623 921
624 /* 2 GHz */ 922 /* 2 GHz */
625 if (triplet->chans.first_channel <= 14) 923 if (band == IEEE80211_BAND_2GHZ)
626 end_channel = triplet->chans.first_channel + 924 end_channel = triplet->chans.first_channel +
627 triplet->chans.num_channels; 925 triplet->chans.num_channels -1;
628 else 926 else
629 end_channel = triplet->chans.first_channel + 927 end_channel = triplet->chans.first_channel +
630 (4 * (triplet->chans.num_channels - 1)); 928 (4 * (triplet->chans.num_channels - 1));
631 929
930 end_channel = max_subband_chan(band,
931 triplet->chans.first_channel,
932 end_channel,
933 triplet->chans.max_power,
934 &country_ie,
935 &country_ie_len);
936
632 /* 937 /*
633 * The +10 is since the regulatory domain expects 938 * The +10 is since the regulatory domain expects
634 * the actual band edge, not the center of freq for 939 * the actual band edge, not the center of freq for
@@ -649,12 +954,15 @@ static struct ieee80211_regdomain *country_ie_2_rd(
649 */ 954 */
650 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40); 955 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40);
651 power_rule->max_antenna_gain = DBI_TO_MBI(100); 956 power_rule->max_antenna_gain = DBI_TO_MBI(100);
652 power_rule->max_eirp = DBM_TO_MBM(100); 957 power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power);
653 958
654 country_ie += 3;
655 country_ie_len -= 3;
656 i++; 959 i++;
657 960
961 if (country_ie_len >= 3) {
962 country_ie += 3;
963 country_ie_len -= 3;
964 }
965
658 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES); 966 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES);
659 } 967 }
660 968
@@ -950,25 +1258,21 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
950 if (r == -ERANGE && 1258 if (r == -ERANGE &&
951 last_request->initiator == 1259 last_request->initiator ==
952 NL80211_REGDOM_SET_BY_COUNTRY_IE) { 1260 NL80211_REGDOM_SET_BY_COUNTRY_IE) {
953#ifdef CONFIG_CFG80211_REG_DEBUG 1261 REG_DBG_PRINT("cfg80211: Leaving channel %d MHz "
954 printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
955 "intact on %s - no rule found in band on " 1262 "intact on %s - no rule found in band on "
956 "Country IE\n", 1263 "Country IE\n",
957 chan->center_freq, wiphy_name(wiphy)); 1264 chan->center_freq, wiphy_name(wiphy));
958#endif
959 } else { 1265 } else {
960 /* 1266 /*
961 * In this case we know the country IE has at least one reg rule 1267 * In this case we know the country IE has at least one reg rule
962 * for the band so we respect its band definitions 1268 * for the band so we respect its band definitions
963 */ 1269 */
964#ifdef CONFIG_CFG80211_REG_DEBUG
965 if (last_request->initiator == 1270 if (last_request->initiator ==
966 NL80211_REGDOM_SET_BY_COUNTRY_IE) 1271 NL80211_REGDOM_SET_BY_COUNTRY_IE)
967 printk(KERN_DEBUG "cfg80211: Disabling " 1272 REG_DBG_PRINT("cfg80211: Disabling "
968 "channel %d MHz on %s due to " 1273 "channel %d MHz on %s due to "
969 "Country IE\n", 1274 "Country IE\n",
970 chan->center_freq, wiphy_name(wiphy)); 1275 chan->center_freq, wiphy_name(wiphy));
971#endif
972 flags |= IEEE80211_CHAN_DISABLED; 1276 flags |= IEEE80211_CHAN_DISABLED;
973 chan->flags = flags; 1277 chan->flags = flags;
974 } 1278 }
@@ -1342,30 +1646,6 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1342} 1646}
1343EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1647EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1344 1648
1345static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
1346 const struct ieee80211_regdomain *src_regd)
1347{
1348 struct ieee80211_regdomain *regd;
1349 int size_of_regd = 0;
1350 unsigned int i;
1351
1352 size_of_regd = sizeof(struct ieee80211_regdomain) +
1353 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
1354
1355 regd = kzalloc(size_of_regd, GFP_KERNEL);
1356 if (!regd)
1357 return -ENOMEM;
1358
1359 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
1360
1361 for (i = 0; i < src_regd->n_reg_rules; i++)
1362 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
1363 sizeof(struct ieee80211_reg_rule));
1364
1365 *dst_regd = regd;
1366 return 0;
1367}
1368
1369/* 1649/*
1370 * Return value which can be used by ignore_request() to indicate 1650 * Return value which can be used by ignore_request() to indicate
1371 * it has been determined we should intersect two regulatory domains 1651 * it has been determined we should intersect two regulatory domains
@@ -1387,7 +1667,7 @@ static int ignore_request(struct wiphy *wiphy,
1387 1667
1388 switch (pending_request->initiator) { 1668 switch (pending_request->initiator) {
1389 case NL80211_REGDOM_SET_BY_CORE: 1669 case NL80211_REGDOM_SET_BY_CORE:
1390 return -EINVAL; 1670 return 0;
1391 case NL80211_REGDOM_SET_BY_COUNTRY_IE: 1671 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
1392 1672
1393 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 1673 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
@@ -1418,8 +1698,6 @@ static int ignore_request(struct wiphy *wiphy,
1418 return REG_INTERSECT; 1698 return REG_INTERSECT;
1419 case NL80211_REGDOM_SET_BY_DRIVER: 1699 case NL80211_REGDOM_SET_BY_DRIVER:
1420 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { 1700 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
1421 if (is_old_static_regdom(cfg80211_regdomain))
1422 return 0;
1423 if (regdom_changes(pending_request->alpha2)) 1701 if (regdom_changes(pending_request->alpha2))
1424 return 0; 1702 return 0;
1425 return -EALREADY; 1703 return -EALREADY;
@@ -1456,8 +1734,7 @@ static int ignore_request(struct wiphy *wiphy,
1456 return -EAGAIN; 1734 return -EAGAIN;
1457 } 1735 }
1458 1736
1459 if (!is_old_static_regdom(cfg80211_regdomain) && 1737 if (!regdom_changes(pending_request->alpha2))
1460 !regdom_changes(pending_request->alpha2))
1461 return -EALREADY; 1738 return -EALREADY;
1462 1739
1463 return 0; 1740 return 0;
@@ -1529,6 +1806,11 @@ new_request:
1529 1806
1530 pending_request = NULL; 1807 pending_request = NULL;
1531 1808
1809 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
1810 user_alpha2[0] = last_request->alpha2[0];
1811 user_alpha2[1] = last_request->alpha2[1];
1812 }
1813
1532 /* When r == REG_INTERSECT we do need to call CRDA */ 1814 /* When r == REG_INTERSECT we do need to call CRDA */
1533 if (r < 0) { 1815 if (r < 0) {
1534 /* 1816 /*
@@ -1648,12 +1930,16 @@ static void queue_regulatory_request(struct regulatory_request *request)
1648 schedule_work(&reg_work); 1930 schedule_work(&reg_work);
1649} 1931}
1650 1932
1651/* Core regulatory hint -- happens once during cfg80211_init() */ 1933/*
1934 * Core regulatory hint -- happens during cfg80211_init()
1935 * and when we restore regulatory settings.
1936 */
1652static int regulatory_hint_core(const char *alpha2) 1937static int regulatory_hint_core(const char *alpha2)
1653{ 1938{
1654 struct regulatory_request *request; 1939 struct regulatory_request *request;
1655 1940
1656 BUG_ON(last_request); 1941 kfree(last_request);
1942 last_request = NULL;
1657 1943
1658 request = kzalloc(sizeof(struct regulatory_request), 1944 request = kzalloc(sizeof(struct regulatory_request),
1659 GFP_KERNEL); 1945 GFP_KERNEL);
@@ -1664,14 +1950,12 @@ static int regulatory_hint_core(const char *alpha2)
1664 request->alpha2[1] = alpha2[1]; 1950 request->alpha2[1] = alpha2[1];
1665 request->initiator = NL80211_REGDOM_SET_BY_CORE; 1951 request->initiator = NL80211_REGDOM_SET_BY_CORE;
1666 1952
1667 queue_regulatory_request(request);
1668
1669 /* 1953 /*
1670 * This ensures last_request is populated once modules 1954 * This ensures last_request is populated once modules
1671 * come swinging in and calling regulatory hints and 1955 * come swinging in and calling regulatory hints and
1672 * wiphy_apply_custom_regulatory(). 1956 * wiphy_apply_custom_regulatory().
1673 */ 1957 */
1674 flush_scheduled_work(); 1958 reg_process_hint(request);
1675 1959
1676 return 0; 1960 return 0;
1677} 1961}
@@ -1758,8 +2042,9 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
1758 * therefore cannot iterate over the rdev list here. 2042 * therefore cannot iterate over the rdev list here.
1759 */ 2043 */
1760void regulatory_hint_11d(struct wiphy *wiphy, 2044void regulatory_hint_11d(struct wiphy *wiphy,
1761 u8 *country_ie, 2045 enum ieee80211_band band,
1762 u8 country_ie_len) 2046 u8 *country_ie,
2047 u8 country_ie_len)
1763{ 2048{
1764 struct ieee80211_regdomain *rd = NULL; 2049 struct ieee80211_regdomain *rd = NULL;
1765 char alpha2[2]; 2050 char alpha2[2];
@@ -1805,9 +2090,11 @@ void regulatory_hint_11d(struct wiphy *wiphy,
1805 wiphy_idx_valid(last_request->wiphy_idx))) 2090 wiphy_idx_valid(last_request->wiphy_idx)))
1806 goto out; 2091 goto out;
1807 2092
1808 rd = country_ie_2_rd(country_ie, country_ie_len, &checksum); 2093 rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum);
1809 if (!rd) 2094 if (!rd) {
2095 REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n");
1810 goto out; 2096 goto out;
2097 }
1811 2098
1812 /* 2099 /*
1813 * This will not happen right now but we leave it here for the 2100 * This will not happen right now but we leave it here for the
@@ -1850,6 +2137,123 @@ out:
1850 mutex_unlock(&reg_mutex); 2137 mutex_unlock(&reg_mutex);
1851} 2138}
1852 2139
2140static void restore_alpha2(char *alpha2, bool reset_user)
2141{
2142 /* indicates there is no alpha2 to consider for restoration */
2143 alpha2[0] = '9';
2144 alpha2[1] = '7';
2145
2146 /* The user setting has precedence over the module parameter */
2147 if (is_user_regdom_saved()) {
2148 /* Unless we're asked to ignore it and reset it */
2149 if (reset_user) {
2150 REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
2151 "including user preference\n");
2152 user_alpha2[0] = '9';
2153 user_alpha2[1] = '7';
2154
2155 /*
2156 * If we're ignoring user settings, we still need to
2157 * check the module parameter to ensure we put things
2158 * back as they were for a full restore.
2159 */
2160 if (!is_world_regdom(ieee80211_regdom)) {
2161 REG_DBG_PRINT("cfg80211: Keeping preference on "
2162 "module parameter ieee80211_regdom: %c%c\n",
2163 ieee80211_regdom[0],
2164 ieee80211_regdom[1]);
2165 alpha2[0] = ieee80211_regdom[0];
2166 alpha2[1] = ieee80211_regdom[1];
2167 }
2168 } else {
2169 REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
2170 "while preserving user preference for: %c%c\n",
2171 user_alpha2[0],
2172 user_alpha2[1]);
2173 alpha2[0] = user_alpha2[0];
2174 alpha2[1] = user_alpha2[1];
2175 }
2176 } else if (!is_world_regdom(ieee80211_regdom)) {
2177 REG_DBG_PRINT("cfg80211: Keeping preference on "
2178 "module parameter ieee80211_regdom: %c%c\n",
2179 ieee80211_regdom[0],
2180 ieee80211_regdom[1]);
2181 alpha2[0] = ieee80211_regdom[0];
2182 alpha2[1] = ieee80211_regdom[1];
2183 } else
2184 REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n");
2185}
2186
2187/*
2188 * Restoring regulatory settings involves ingoring any
2189 * possibly stale country IE information and user regulatory
2190 * settings if so desired, this includes any beacon hints
2191 * learned as we could have traveled outside to another country
2192 * after disconnection. To restore regulatory settings we do
2193 * exactly what we did at bootup:
2194 *
2195 * - send a core regulatory hint
2196 * - send a user regulatory hint if applicable
2197 *
2198 * Device drivers that send a regulatory hint for a specific country
2199 * keep their own regulatory domain on wiphy->regd so that does does
2200 * not need to be remembered.
2201 */
2202static void restore_regulatory_settings(bool reset_user)
2203{
2204 char alpha2[2];
2205 struct reg_beacon *reg_beacon, *btmp;
2206
2207 mutex_lock(&cfg80211_mutex);
2208 mutex_lock(&reg_mutex);
2209
2210 reset_regdomains();
2211 restore_alpha2(alpha2, reset_user);
2212
2213 /* Clear beacon hints */
2214 spin_lock_bh(&reg_pending_beacons_lock);
2215 if (!list_empty(&reg_pending_beacons)) {
2216 list_for_each_entry_safe(reg_beacon, btmp,
2217 &reg_pending_beacons, list) {
2218 list_del(&reg_beacon->list);
2219 kfree(reg_beacon);
2220 }
2221 }
2222 spin_unlock_bh(&reg_pending_beacons_lock);
2223
2224 if (!list_empty(&reg_beacon_list)) {
2225 list_for_each_entry_safe(reg_beacon, btmp,
2226 &reg_beacon_list, list) {
2227 list_del(&reg_beacon->list);
2228 kfree(reg_beacon);
2229 }
2230 }
2231
2232 /* First restore to the basic regulatory settings */
2233 cfg80211_regdomain = cfg80211_world_regdom;
2234
2235 mutex_unlock(&reg_mutex);
2236 mutex_unlock(&cfg80211_mutex);
2237
2238 regulatory_hint_core(cfg80211_regdomain->alpha2);
2239
2240 /*
2241 * This restores the ieee80211_regdom module parameter
2242 * preference or the last user requested regulatory
2243 * settings, user regulatory settings takes precedence.
2244 */
2245 if (is_an_alpha2(alpha2))
2246 regulatory_hint_user(user_alpha2);
2247}
2248
2249
2250void regulatory_hint_disconnect(void)
2251{
2252 REG_DBG_PRINT("cfg80211: All devices are disconnected, going to "
2253 "restore regulatory settings\n");
2254 restore_regulatory_settings(false);
2255}
2256
1853static bool freq_is_chan_12_13_14(u16 freq) 2257static bool freq_is_chan_12_13_14(u16 freq)
1854{ 2258{
1855 if (freq == ieee80211_channel_to_frequency(12) || 2259 if (freq == ieee80211_channel_to_frequency(12) ||
@@ -1875,13 +2279,12 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
1875 if (!reg_beacon) 2279 if (!reg_beacon)
1876 return -ENOMEM; 2280 return -ENOMEM;
1877 2281
1878#ifdef CONFIG_CFG80211_REG_DEBUG 2282 REG_DBG_PRINT("cfg80211: Found new beacon on "
1879 printk(KERN_DEBUG "cfg80211: Found new beacon on " 2283 "frequency: %d MHz (Ch %d) on %s\n",
1880 "frequency: %d MHz (Ch %d) on %s\n", 2284 beacon_chan->center_freq,
1881 beacon_chan->center_freq, 2285 ieee80211_frequency_to_channel(beacon_chan->center_freq),
1882 ieee80211_frequency_to_channel(beacon_chan->center_freq), 2286 wiphy_name(wiphy));
1883 wiphy_name(wiphy)); 2287
1884#endif
1885 memcpy(&reg_beacon->chan, beacon_chan, 2288 memcpy(&reg_beacon->chan, beacon_chan,
1886 sizeof(struct ieee80211_channel)); 2289 sizeof(struct ieee80211_channel));
1887 2290
@@ -2039,8 +2442,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2039 * If someone else asked us to change the rd lets only bother 2442 * If someone else asked us to change the rd lets only bother
2040 * checking if the alpha2 changes if CRDA was already called 2443 * checking if the alpha2 changes if CRDA was already called
2041 */ 2444 */
2042 if (!is_old_static_regdom(cfg80211_regdomain) && 2445 if (!regdom_changes(rd->alpha2))
2043 !regdom_changes(rd->alpha2))
2044 return -EINVAL; 2446 return -EINVAL;
2045 } 2447 }
2046 2448
@@ -2239,15 +2641,11 @@ int regulatory_init(void)
2239 spin_lock_init(&reg_requests_lock); 2641 spin_lock_init(&reg_requests_lock);
2240 spin_lock_init(&reg_pending_beacons_lock); 2642 spin_lock_init(&reg_pending_beacons_lock);
2241 2643
2242#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2243 cfg80211_regdomain = static_regdom(ieee80211_regdom);
2244
2245 printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
2246 print_regdomain_info(cfg80211_regdomain);
2247#else
2248 cfg80211_regdomain = cfg80211_world_regdom; 2644 cfg80211_regdomain = cfg80211_world_regdom;
2249 2645
2250#endif 2646 user_alpha2[0] = '9';
2647 user_alpha2[1] = '7';
2648
2251 /* We always try to get an update for the static regdomain */ 2649 /* We always try to get an update for the static regdomain */
2252 err = regulatory_hint_core(cfg80211_regdomain->alpha2); 2650 err = regulatory_hint_core(cfg80211_regdomain->alpha2);
2253 if (err) { 2651 if (err) {
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 3362c7c069b2..b26224a9f3bc 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -41,15 +41,44 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
41 * regulatory_hint_11d - hints a country IE as a regulatory domain 41 * regulatory_hint_11d - hints a country IE as a regulatory domain
42 * @wiphy: the wireless device giving the hint (used only for reporting 42 * @wiphy: the wireless device giving the hint (used only for reporting
43 * conflicts) 43 * conflicts)
44 * @band: the band on which the country IE was received on. This determines
45 * the band we'll process the country IE channel triplets for.
44 * @country_ie: pointer to the country IE 46 * @country_ie: pointer to the country IE
45 * @country_ie_len: length of the country IE 47 * @country_ie_len: length of the country IE
46 * 48 *
47 * We will intersect the rd with the what CRDA tells us should apply 49 * We will intersect the rd with the what CRDA tells us should apply
48 * for the alpha2 this country IE belongs to, this prevents APs from 50 * for the alpha2 this country IE belongs to, this prevents APs from
49 * sending us incorrect or outdated information against a country. 51 * sending us incorrect or outdated information against a country.
52 *
53 * The AP is expected to provide Country IE channel triplets for the
54 * band it is on. It is technically possible for APs to send channel
55 * country IE triplets even for channels outside of the band they are
56 * in but for that they would have to use the regulatory extension
57 * in combination with a triplet but this behaviour is currently
58 * not observed. For this reason if a triplet is seen with channel
59 * information for a band the BSS is not present in it will be ignored.
50 */ 60 */
51void regulatory_hint_11d(struct wiphy *wiphy, 61void regulatory_hint_11d(struct wiphy *wiphy,
62 enum ieee80211_band band,
52 u8 *country_ie, 63 u8 *country_ie,
53 u8 country_ie_len); 64 u8 country_ie_len);
54 65
66/**
67 * regulatory_hint_disconnect - informs all devices have been disconneted
68 *
69 * Regulotory rules can be enhanced further upon scanning and upon
70 * connection to an AP. These rules become stale if we disconnect
71 * and go to another country, whether or not we suspend and resume.
72 * If we suspend, go to another country and resume we'll automatically
73 * get disconnected shortly after resuming and things will be reset as well.
74 * This routine is a helper to restore regulatory settings to how they were
75 * prior to our first connect attempt. This includes ignoring country IE and
76 * beacon regulatory hints. The ieee80211_regdom module parameter will always
77 * be respected but if a user had set the regulatory domain that will take
78 * precedence.
79 *
80 * Must be called from process context.
81 */
82void regulatory_hint_disconnect(void);
83
55#endif /* __NET_WIRELESS_REG_H */ 84#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 000000000000..818222c92513
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,7 @@
1#ifndef __REGDB_H__
2#define __REGDB_H__
3
4extern const struct ieee80211_regdomain *reg_regdb[];
5extern int reg_regdb_size;
6
7#endif /* __REGDB_H__ */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0c2cbbebca95..a026c6d56bd3 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -4,6 +4,7 @@
4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/slab.h>
7#include <linux/module.h> 8#include <linux/module.h>
8#include <linux/netdevice.h> 9#include <linux/netdevice.h>
9#include <linux/wireless.h> 10#include <linux/wireless.h>
@@ -100,8 +101,10 @@ static void bss_release(struct kref *ref)
100 if (bss->pub.free_priv) 101 if (bss->pub.free_priv)
101 bss->pub.free_priv(&bss->pub); 102 bss->pub.free_priv(&bss->pub);
102 103
103 if (bss->ies_allocated) 104 if (bss->beacon_ies_allocated)
104 kfree(bss->pub.information_elements); 105 kfree(bss->pub.beacon_ies);
106 if (bss->proberesp_ies_allocated)
107 kfree(bss->pub.proberesp_ies);
105 108
106 BUG_ON(atomic_read(&bss->hold)); 109 BUG_ON(atomic_read(&bss->hold));
107 110
@@ -141,9 +144,9 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
141 dev->bss_generation++; 144 dev->bss_generation++;
142} 145}
143 146
144static u8 *find_ie(u8 num, u8 *ies, int len) 147const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
145{ 148{
146 while (len > 2 && ies[0] != num) { 149 while (len > 2 && ies[0] != eid) {
147 len -= ies[1] + 2; 150 len -= ies[1] + 2;
148 ies += ies[1] + 2; 151 ies += ies[1] + 2;
149 } 152 }
@@ -153,11 +156,12 @@ static u8 *find_ie(u8 num, u8 *ies, int len)
153 return NULL; 156 return NULL;
154 return ies; 157 return ies;
155} 158}
159EXPORT_SYMBOL(cfg80211_find_ie);
156 160
157static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) 161static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
158{ 162{
159 const u8 *ie1 = find_ie(num, ies1, len1); 163 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
160 const u8 *ie2 = find_ie(num, ies2, len2); 164 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
161 int r; 165 int r;
162 166
163 if (!ie1 && !ie2) 167 if (!ie1 && !ie2)
@@ -183,9 +187,9 @@ static bool is_bss(struct cfg80211_bss *a,
183 if (!ssid) 187 if (!ssid)
184 return true; 188 return true;
185 189
186 ssidie = find_ie(WLAN_EID_SSID, 190 ssidie = cfg80211_find_ie(WLAN_EID_SSID,
187 a->information_elements, 191 a->information_elements,
188 a->len_information_elements); 192 a->len_information_elements);
189 if (!ssidie) 193 if (!ssidie)
190 return false; 194 return false;
191 if (ssidie[1] != ssid_len) 195 if (ssidie[1] != ssid_len)
@@ -202,9 +206,9 @@ static bool is_mesh(struct cfg80211_bss *a,
202 if (!is_zero_ether_addr(a->bssid)) 206 if (!is_zero_ether_addr(a->bssid))
203 return false; 207 return false;
204 208
205 ie = find_ie(WLAN_EID_MESH_ID, 209 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
206 a->information_elements, 210 a->information_elements,
207 a->len_information_elements); 211 a->len_information_elements);
208 if (!ie) 212 if (!ie)
209 return false; 213 return false;
210 if (ie[1] != meshidlen) 214 if (ie[1] != meshidlen)
@@ -212,9 +216,9 @@ static bool is_mesh(struct cfg80211_bss *a,
212 if (memcmp(ie + 2, meshid, meshidlen)) 216 if (memcmp(ie + 2, meshid, meshidlen))
213 return false; 217 return false;
214 218
215 ie = find_ie(WLAN_EID_MESH_CONFIG, 219 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
216 a->information_elements, 220 a->information_elements,
217 a->len_information_elements); 221 a->len_information_elements);
218 if (!ie) 222 if (!ie)
219 return false; 223 return false;
220 if (ie[1] != sizeof(struct ieee80211_meshconf_ie)) 224 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
@@ -375,8 +379,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
375 379
376static struct cfg80211_internal_bss * 380static struct cfg80211_internal_bss *
377cfg80211_bss_update(struct cfg80211_registered_device *dev, 381cfg80211_bss_update(struct cfg80211_registered_device *dev,
378 struct cfg80211_internal_bss *res, 382 struct cfg80211_internal_bss *res)
379 bool overwrite)
380{ 383{
381 struct cfg80211_internal_bss *found = NULL; 384 struct cfg80211_internal_bss *found = NULL;
382 const u8 *meshid, *meshcfg; 385 const u8 *meshid, *meshcfg;
@@ -394,11 +397,12 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
394 397
395 if (is_zero_ether_addr(res->pub.bssid)) { 398 if (is_zero_ether_addr(res->pub.bssid)) {
396 /* must be mesh, verify */ 399 /* must be mesh, verify */
397 meshid = find_ie(WLAN_EID_MESH_ID, res->pub.information_elements, 400 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
398 res->pub.len_information_elements); 401 res->pub.information_elements,
399 meshcfg = find_ie(WLAN_EID_MESH_CONFIG, 402 res->pub.len_information_elements);
400 res->pub.information_elements, 403 meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
401 res->pub.len_information_elements); 404 res->pub.information_elements,
405 res->pub.len_information_elements);
402 if (!meshid || !meshcfg || 406 if (!meshid || !meshcfg ||
403 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) { 407 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
404 /* bogus mesh */ 408 /* bogus mesh */
@@ -418,28 +422,64 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
418 found->pub.capability = res->pub.capability; 422 found->pub.capability = res->pub.capability;
419 found->ts = res->ts; 423 found->ts = res->ts;
420 424
421 /* overwrite IEs */ 425 /* Update IEs */
422 if (overwrite) { 426 if (res->pub.proberesp_ies) {
423 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 427 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
424 size_t ielen = res->pub.len_information_elements; 428 size_t ielen = res->pub.len_proberesp_ies;
429
430 if (found->pub.proberesp_ies &&
431 !found->proberesp_ies_allocated &&
432 ksize(found) >= used + ielen) {
433 memcpy(found->pub.proberesp_ies,
434 res->pub.proberesp_ies, ielen);
435 found->pub.len_proberesp_ies = ielen;
436 } else {
437 u8 *ies = found->pub.proberesp_ies;
438
439 if (found->proberesp_ies_allocated)
440 ies = krealloc(ies, ielen, GFP_ATOMIC);
441 else
442 ies = kmalloc(ielen, GFP_ATOMIC);
443
444 if (ies) {
445 memcpy(ies, res->pub.proberesp_ies,
446 ielen);
447 found->proberesp_ies_allocated = true;
448 found->pub.proberesp_ies = ies;
449 found->pub.len_proberesp_ies = ielen;
450 }
451 }
425 452
426 if (!found->ies_allocated && ksize(found) >= used + ielen) { 453 /* Override possible earlier Beacon frame IEs */
427 memcpy(found->pub.information_elements, 454 found->pub.information_elements =
428 res->pub.information_elements, ielen); 455 found->pub.proberesp_ies;
429 found->pub.len_information_elements = ielen; 456 found->pub.len_information_elements =
457 found->pub.len_proberesp_ies;
458 }
459 if (res->pub.beacon_ies) {
460 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
461 size_t ielen = res->pub.len_beacon_ies;
462
463 if (found->pub.beacon_ies &&
464 !found->beacon_ies_allocated &&
465 ksize(found) >= used + ielen) {
466 memcpy(found->pub.beacon_ies,
467 res->pub.beacon_ies, ielen);
468 found->pub.len_beacon_ies = ielen;
430 } else { 469 } else {
431 u8 *ies = found->pub.information_elements; 470 u8 *ies = found->pub.beacon_ies;
432 471
433 if (found->ies_allocated) 472 if (found->beacon_ies_allocated)
434 ies = krealloc(ies, ielen, GFP_ATOMIC); 473 ies = krealloc(ies, ielen, GFP_ATOMIC);
435 else 474 else
436 ies = kmalloc(ielen, GFP_ATOMIC); 475 ies = kmalloc(ielen, GFP_ATOMIC);
437 476
438 if (ies) { 477 if (ies) {
439 memcpy(ies, res->pub.information_elements, ielen); 478 memcpy(ies, res->pub.beacon_ies,
440 found->ies_allocated = true; 479 ielen);
441 found->pub.information_elements = ies; 480 found->beacon_ies_allocated = true;
442 found->pub.len_information_elements = ielen; 481 found->pub.beacon_ies = ies;
482 found->pub.len_beacon_ies = ielen;
443 } 483 }
444 } 484 }
445 } 485 }
@@ -489,14 +529,26 @@ cfg80211_inform_bss(struct wiphy *wiphy,
489 res->pub.tsf = timestamp; 529 res->pub.tsf = timestamp;
490 res->pub.beacon_interval = beacon_interval; 530 res->pub.beacon_interval = beacon_interval;
491 res->pub.capability = capability; 531 res->pub.capability = capability;
492 /* point to after the private area */ 532 /*
493 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 533 * Since we do not know here whether the IEs are from a Beacon or Probe
494 memcpy(res->pub.information_elements, ie, ielen); 534 * Response frame, we need to pick one of the options and only use it
495 res->pub.len_information_elements = ielen; 535 * with the driver that does not provide the full Beacon/Probe Response
536 * frame. Use Beacon frame pointer to avoid indicating that this should
537 * override the information_elements pointer should we have received an
538 * earlier indication of Probe Response data.
539 *
540 * The initial buffer for the IEs is allocated with the BSS entry and
541 * is located after the private area.
542 */
543 res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz;
544 memcpy(res->pub.beacon_ies, ie, ielen);
545 res->pub.len_beacon_ies = ielen;
546 res->pub.information_elements = res->pub.beacon_ies;
547 res->pub.len_information_elements = res->pub.len_beacon_ies;
496 548
497 kref_init(&res->ref); 549 kref_init(&res->ref);
498 550
499 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0); 551 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
500 if (!res) 552 if (!res)
501 return NULL; 553 return NULL;
502 554
@@ -517,7 +569,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
517 struct cfg80211_internal_bss *res; 569 struct cfg80211_internal_bss *res;
518 size_t ielen = len - offsetof(struct ieee80211_mgmt, 570 size_t ielen = len - offsetof(struct ieee80211_mgmt,
519 u.probe_resp.variable); 571 u.probe_resp.variable);
520 bool overwrite;
521 size_t privsz = wiphy->bss_priv_size; 572 size_t privsz = wiphy->bss_priv_size;
522 573
523 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 574 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
@@ -538,16 +589,28 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
538 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); 589 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
539 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 590 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
540 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 591 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
541 /* point to after the private area */ 592 /*
542 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 593 * The initial buffer for the IEs is allocated with the BSS entry and
543 memcpy(res->pub.information_elements, mgmt->u.probe_resp.variable, ielen); 594 * is located after the private area.
544 res->pub.len_information_elements = ielen; 595 */
596 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
597 res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz;
598 memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable,
599 ielen);
600 res->pub.len_proberesp_ies = ielen;
601 res->pub.information_elements = res->pub.proberesp_ies;
602 res->pub.len_information_elements = res->pub.len_proberesp_ies;
603 } else {
604 res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz;
605 memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen);
606 res->pub.len_beacon_ies = ielen;
607 res->pub.information_elements = res->pub.beacon_ies;
608 res->pub.len_information_elements = res->pub.len_beacon_ies;
609 }
545 610
546 kref_init(&res->ref); 611 kref_init(&res->ref);
547 612
548 overwrite = ieee80211_is_probe_resp(mgmt->frame_control); 613 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
549
550 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, overwrite);
551 if (!res) 614 if (!res)
552 return NULL; 615 return NULL;
553 616
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dc0fc4989d54..f4dfd5f5f2ea 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/slab.h>
10#include <linux/workqueue.h> 11#include <linux/workqueue.h>
11#include <linux/wireless.h> 12#include <linux/wireless.h>
12#include <net/iw_handler.h> 13#include <net/iw_handler.h>
@@ -34,6 +35,44 @@ struct cfg80211_conn {
34 bool auto_auth, prev_bssid_valid; 35 bool auto_auth, prev_bssid_valid;
35}; 36};
36 37
38bool cfg80211_is_all_idle(void)
39{
40 struct cfg80211_registered_device *rdev;
41 struct wireless_dev *wdev;
42 bool is_all_idle = true;
43
44 mutex_lock(&cfg80211_mutex);
45
46 /*
47 * All devices must be idle as otherwise if you are actively
48 * scanning some new beacon hints could be learned and would
49 * count as new regulatory hints.
50 */
51 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
52 cfg80211_lock_rdev(rdev);
53 list_for_each_entry(wdev, &rdev->netdev_list, list) {
54 wdev_lock(wdev);
55 if (wdev->sme_state != CFG80211_SME_IDLE)
56 is_all_idle = false;
57 wdev_unlock(wdev);
58 }
59 cfg80211_unlock_rdev(rdev);
60 }
61
62 mutex_unlock(&cfg80211_mutex);
63
64 return is_all_idle;
65}
66
67static void disconnect_work(struct work_struct *work)
68{
69 if (!cfg80211_is_all_idle())
70 return;
71
72 regulatory_hint_disconnect();
73}
74
75static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
37 76
38static int cfg80211_conn_scan(struct wireless_dev *wdev) 77static int cfg80211_conn_scan(struct wireless_dev *wdev)
39{ 78{
@@ -454,6 +493,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
454 * - and country_ie[1] which is the IE length 493 * - and country_ie[1] which is the IE length
455 */ 494 */
456 regulatory_hint_11d(wdev->wiphy, 495 regulatory_hint_11d(wdev->wiphy,
496 bss->channel->band,
457 country_ie + 2, 497 country_ie + 2,
458 country_ie[1]); 498 country_ie[1]);
459} 499}
@@ -657,6 +697,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 697 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
658 wdev->wext.connect.ssid_len = 0; 698 wdev->wext.connect.ssid_len = 0;
659#endif 699#endif
700
701 schedule_work(&cfg80211_disconnect_work);
660} 702}
661 703
662void cfg80211_disconnected(struct net_device *dev, u16 reason, 704void cfg80211_disconnected(struct net_device *dev, u16 reason,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index efe3c5c92b2d..9f2cef3e0ca0 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -33,10 +33,30 @@ static ssize_t name ## _show(struct device *dev, \
33 33
34SHOW_FMT(index, "%d", wiphy_idx); 34SHOW_FMT(index, "%d", wiphy_idx);
35SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); 35SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
36SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
37
38static ssize_t addresses_show(struct device *dev,
39 struct device_attribute *attr,
40 char *buf)
41{
42 struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
43 char *start = buf;
44 int i;
45
46 if (!wiphy->addresses)
47 return sprintf(buf, "%pM\n", wiphy->perm_addr);
48
49 for (i = 0; i < wiphy->n_addresses; i++)
50 buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
51
52 return buf - start;
53}
36 54
37static struct device_attribute ieee80211_dev_attrs[] = { 55static struct device_attribute ieee80211_dev_attrs[] = {
38 __ATTR_RO(index), 56 __ATTR_RO(index),
39 __ATTR_RO(macaddress), 57 __ATTR_RO(macaddress),
58 __ATTR_RO(address_mask),
59 __ATTR_RO(addresses),
40 {} 60 {}
41}; 61};
42 62
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 59361fdcb5d0..d3574a4eb3ba 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
8#include <linux/slab.h>
8#include <net/cfg80211.h> 9#include <net/cfg80211.h>
9#include <net/ip.h> 10#include <net/ip.h>
10#include "core.h" 11#include "core.h"
@@ -227,8 +228,11 @@ unsigned int ieee80211_hdrlen(__le16 fc)
227 if (ieee80211_is_data(fc)) { 228 if (ieee80211_is_data(fc)) {
228 if (ieee80211_has_a4(fc)) 229 if (ieee80211_has_a4(fc))
229 hdrlen = 30; 230 hdrlen = 30;
230 if (ieee80211_is_data_qos(fc)) 231 if (ieee80211_is_data_qos(fc)) {
231 hdrlen += IEEE80211_QOS_CTL_LEN; 232 hdrlen += IEEE80211_QOS_CTL_LEN;
233 if (ieee80211_has_order(fc))
234 hdrlen += IEEE80211_HT_CTL_LEN;
235 }
232 goto out; 236 goto out;
233 } 237 }
234 238
@@ -285,7 +289,7 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
285 } 289 }
286} 290}
287 291
288int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr, 292int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
289 enum nl80211_iftype iftype) 293 enum nl80211_iftype iftype)
290{ 294{
291 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 295 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -383,7 +387,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
383} 387}
384EXPORT_SYMBOL(ieee80211_data_to_8023); 388EXPORT_SYMBOL(ieee80211_data_to_8023);
385 389
386int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr, 390int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
387 enum nl80211_iftype iftype, u8 *bssid, bool qos) 391 enum nl80211_iftype iftype, u8 *bssid, bool qos)
388{ 392{
389 struct ieee80211_hdr hdr; 393 struct ieee80211_hdr hdr;
@@ -497,6 +501,101 @@ int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
497} 501}
498EXPORT_SYMBOL(ieee80211_data_from_8023); 502EXPORT_SYMBOL(ieee80211_data_from_8023);
499 503
504
505void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
506 const u8 *addr, enum nl80211_iftype iftype,
507 const unsigned int extra_headroom)
508{
509 struct sk_buff *frame = NULL;
510 u16 ethertype;
511 u8 *payload;
512 const struct ethhdr *eth;
513 int remaining, err;
514 u8 dst[ETH_ALEN], src[ETH_ALEN];
515
516 err = ieee80211_data_to_8023(skb, addr, iftype);
517 if (err)
518 goto out;
519
520 /* skip the wrapping header */
521 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
522 if (!eth)
523 goto out;
524
525 while (skb != frame) {
526 u8 padding;
527 __be16 len = eth->h_proto;
528 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
529
530 remaining = skb->len;
531 memcpy(dst, eth->h_dest, ETH_ALEN);
532 memcpy(src, eth->h_source, ETH_ALEN);
533
534 padding = (4 - subframe_len) & 0x3;
535 /* the last MSDU has no padding */
536 if (subframe_len > remaining)
537 goto purge;
538
539 skb_pull(skb, sizeof(struct ethhdr));
540 /* reuse skb for the last subframe */
541 if (remaining <= subframe_len + padding)
542 frame = skb;
543 else {
544 unsigned int hlen = ALIGN(extra_headroom, 4);
545 /*
546 * Allocate and reserve two bytes more for payload
547 * alignment since sizeof(struct ethhdr) is 14.
548 */
549 frame = dev_alloc_skb(hlen + subframe_len + 2);
550 if (!frame)
551 goto purge;
552
553 skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
554 memcpy(skb_put(frame, ntohs(len)), skb->data,
555 ntohs(len));
556
557 eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
558 padding);
559 if (!eth) {
560 dev_kfree_skb(frame);
561 goto purge;
562 }
563 }
564
565 skb_reset_network_header(frame);
566 frame->dev = skb->dev;
567 frame->priority = skb->priority;
568
569 payload = frame->data;
570 ethertype = (payload[6] << 8) | payload[7];
571
572 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
573 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
574 compare_ether_addr(payload,
575 bridge_tunnel_header) == 0)) {
576 /* remove RFC1042 or Bridge-Tunnel
577 * encapsulation and replace EtherType */
578 skb_pull(frame, 6);
579 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
580 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
581 } else {
582 memcpy(skb_push(frame, sizeof(__be16)), &len,
583 sizeof(__be16));
584 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
585 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
586 }
587 __skb_queue_tail(list, frame);
588 }
589
590 return;
591
592 purge:
593 __skb_queue_purge(list);
594 out:
595 dev_kfree_skb(skb);
596}
597EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
598
500/* Given a data frame determine the 802.1p/1d tag to use. */ 599/* Given a data frame determine the 802.1p/1d tag to use. */
501unsigned int cfg80211_classify8021d(struct sk_buff *skb) 600unsigned int cfg80211_classify8021d(struct sk_buff *skb)
502{ 601{
@@ -720,3 +819,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
720 819
721 return err; 820 return err;
722} 821}
822
823u16 cfg80211_calculate_bitrate(struct rate_info *rate)
824{
825 int modulation, streams, bitrate;
826
827 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
828 return rate->legacy;
829
830 /* the formula below does only work for MCS values smaller than 32 */
831 if (rate->mcs >= 32)
832 return 0;
833
834 modulation = rate->mcs & 7;
835 streams = (rate->mcs >> 3) + 1;
836
837 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
838 13500000 : 6500000;
839
840 if (modulation < 4)
841 bitrate *= (modulation + 1);
842 else if (modulation == 4)
843 bitrate *= (modulation + 2);
844 else
845 bitrate *= (modulation + 3);
846
847 bitrate *= streams;
848
849 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
850 bitrate = (bitrate / 9) * 10;
851
852 /* do NOT round down here */
853 return (bitrate + 50000) / 100000;
854}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 54face3d4424..a60a2773b497 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -12,6 +12,7 @@
12#include <linux/nl80211.h> 12#include <linux/nl80211.h>
13#include <linux/if_arp.h> 13#include <linux/if_arp.h>
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/slab.h>
15#include <net/iw_handler.h> 16#include <net/iw_handler.h>
16#include <net/cfg80211.h> 17#include <net/cfg80211.h>
17#include "wext-compat.h" 18#include "wext-compat.h"
@@ -1099,8 +1100,8 @@ int cfg80211_wext_siwpower(struct net_device *dev,
1099{ 1100{
1100 struct wireless_dev *wdev = dev->ieee80211_ptr; 1101 struct wireless_dev *wdev = dev->ieee80211_ptr;
1101 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1102 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
1102 bool ps = wdev->wext.ps; 1103 bool ps = wdev->ps;
1103 int timeout = wdev->wext.ps_timeout; 1104 int timeout = wdev->ps_timeout;
1104 int err; 1105 int err;
1105 1106
1106 if (wdev->iftype != NL80211_IFTYPE_STATION) 1107 if (wdev->iftype != NL80211_IFTYPE_STATION)
@@ -1133,8 +1134,8 @@ int cfg80211_wext_siwpower(struct net_device *dev,
1133 if (err) 1134 if (err)
1134 return err; 1135 return err;
1135 1136
1136 wdev->wext.ps = ps; 1137 wdev->ps = ps;
1137 wdev->wext.ps_timeout = timeout; 1138 wdev->ps_timeout = timeout;
1138 1139
1139 return 0; 1140 return 0;
1140 1141
@@ -1147,7 +1148,7 @@ int cfg80211_wext_giwpower(struct net_device *dev,
1147{ 1148{
1148 struct wireless_dev *wdev = dev->ieee80211_ptr; 1149 struct wireless_dev *wdev = dev->ieee80211_ptr;
1149 1150
1150 wrq->disabled = !wdev->wext.ps; 1151 wrq->disabled = !wdev->ps;
1151 1152
1152 return 0; 1153 return 0;
1153} 1154}
@@ -1204,21 +1205,47 @@ int cfg80211_wext_siwrate(struct net_device *dev,
1204 struct wireless_dev *wdev = dev->ieee80211_ptr; 1205 struct wireless_dev *wdev = dev->ieee80211_ptr;
1205 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1206 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
1206 struct cfg80211_bitrate_mask mask; 1207 struct cfg80211_bitrate_mask mask;
1208 u32 fixed, maxrate;
1209 struct ieee80211_supported_band *sband;
1210 int band, ridx;
1211 bool match = false;
1207 1212
1208 if (!rdev->ops->set_bitrate_mask) 1213 if (!rdev->ops->set_bitrate_mask)
1209 return -EOPNOTSUPP; 1214 return -EOPNOTSUPP;
1210 1215
1211 mask.fixed = 0; 1216 memset(&mask, 0, sizeof(mask));
1212 mask.maxrate = 0; 1217 fixed = 0;
1218 maxrate = (u32)-1;
1213 1219
1214 if (rate->value < 0) { 1220 if (rate->value < 0) {
1215 /* nothing */ 1221 /* nothing */
1216 } else if (rate->fixed) { 1222 } else if (rate->fixed) {
1217 mask.fixed = rate->value / 1000; /* kbps */ 1223 fixed = rate->value / 100000;
1218 } else { 1224 } else {
1219 mask.maxrate = rate->value / 1000; /* kbps */ 1225 maxrate = rate->value / 100000;
1220 } 1226 }
1221 1227
1228 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1229 sband = wdev->wiphy->bands[band];
1230 if (sband == NULL)
1231 continue;
1232 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
1233 struct ieee80211_rate *srate = &sband->bitrates[ridx];
1234 if (fixed == srate->bitrate) {
1235 mask.control[band].legacy = 1 << ridx;
1236 match = true;
1237 break;
1238 }
1239 if (srate->bitrate <= maxrate) {
1240 mask.control[band].legacy |= 1 << ridx;
1241 match = true;
1242 }
1243 }
1244 }
1245
1246 if (!match)
1247 return -EINVAL;
1248
1222 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); 1249 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
1223} 1250}
1224EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate); 1251EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
@@ -1257,10 +1284,7 @@ int cfg80211_wext_giwrate(struct net_device *dev,
1257 if (!(sinfo.filled & STATION_INFO_TX_BITRATE)) 1284 if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
1258 return -EOPNOTSUPP; 1285 return -EOPNOTSUPP;
1259 1286
1260 rate->value = 0; 1287 rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
1261
1262 if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
1263 rate->value = 100000 * sinfo.txrate.legacy;
1264 1288
1265 return 0; 1289 return 0;
1266} 1290}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 5e1656bdf23b..4f5a47091fde 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include <linux/slab.h>
13#include <linux/wireless.h> 14#include <linux/wireless.h>
14#include <linux/uaccess.h> 15#include <linux/uaccess.h>
15#include <net/cfg80211.h> 16#include <net/cfg80211.h>
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c
index a3c2277de9e5..3feb28e41c53 100644
--- a/net/wireless/wext-priv.c
+++ b/net/wireless/wext-priv.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * (As all part of the Linux kernel, this file is GPL) 8 * (As all part of the Linux kernel, this file is GPL)
9 */ 9 */
10#include <linux/slab.h>
10#include <linux/wireless.h> 11#include <linux/wireless.h>
11#include <linux/netdevice.h> 12#include <linux/netdevice.h>
12#include <net/iw_handler.h> 13#include <net/iw_handler.h>
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 273a7f77c834..8bafa31fa9f8 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -140,7 +140,7 @@ static const struct file_operations wireless_seq_fops = {
140 .release = seq_release_net, 140 .release = seq_release_net,
141}; 141};
142 142
143int wext_proc_init(struct net *net) 143int __net_init wext_proc_init(struct net *net)
144{ 144{
145 /* Create /proc/net/wireless entry */ 145 /* Create /proc/net/wireless entry */
146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) 146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops))
@@ -149,7 +149,7 @@ int wext_proc_init(struct net *net)
149 return 0; 149 return 0;
150} 150}
151 151
152void wext_proc_exit(struct net *net) 152void __net_exit wext_proc_exit(struct net *net)
153{ 153{
154 proc_net_remove(net, "wireless"); 154 proc_net_remove(net, "wireless");
155} 155}
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 5615a8802536..d5c6140f4cb8 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
9#include <linux/if_arp.h> 9#include <linux/if_arp.h>
10#include <linux/slab.h>
10#include <net/cfg80211.h> 11#include <net/cfg80211.h>
11#include "wext-compat.h" 12#include "wext-compat.h"
12#include "nl80211.h" 13#include "nl80211.h"
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e3219e4cd044..36e84e13c6aa 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -47,6 +47,7 @@
47#include <linux/netdevice.h> 47#include <linux/netdevice.h>
48#include <linux/if_arp.h> 48#include <linux/if_arp.h>
49#include <linux/skbuff.h> 49#include <linux/skbuff.h>
50#include <linux/slab.h>
50#include <net/sock.h> 51#include <net/sock.h>
51#include <net/tcp_states.h> 52#include <net/tcp_states.h>
52#include <asm/uaccess.h> 53#include <asm/uaccess.h>
@@ -55,6 +56,7 @@
55#include <linux/notifier.h> 56#include <linux/notifier.h>
56#include <linux/init.h> 57#include <linux/init.h>
57#include <linux/compat.h> 58#include <linux/compat.h>
59#include <linux/ctype.h>
58 60
59#include <net/x25.h> 61#include <net/x25.h>
60#include <net/compat.h> 62#include <net/compat.h>
@@ -81,6 +83,41 @@ struct compat_x25_subscrip_struct {
81}; 83};
82#endif 84#endif
83 85
86
87int x25_parse_address_block(struct sk_buff *skb,
88 struct x25_address *called_addr,
89 struct x25_address *calling_addr)
90{
91 unsigned char len;
92 int needed;
93 int rc;
94
95 if (skb->len < 1) {
96 /* packet has no address block */
97 rc = 0;
98 goto empty;
99 }
100
101 len = *skb->data;
102 needed = 1 + (len >> 4) + (len & 0x0f);
103
104 if (skb->len < needed) {
105 /* packet is too short to hold the addresses it claims
106 to hold */
107 rc = -1;
108 goto empty;
109 }
110
111 return x25_addr_ntoa(skb->data, called_addr, calling_addr);
112
113empty:
114 *called_addr->x25_addr = 0;
115 *calling_addr->x25_addr = 0;
116
117 return rc;
118}
119
120
84int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, 121int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
85 struct x25_address *calling_addr) 122 struct x25_address *calling_addr)
86{ 123{
@@ -365,6 +402,7 @@ static void __x25_destroy_socket(struct sock *sk)
365 /* 402 /*
366 * Queue the unaccepted socket for death 403 * Queue the unaccepted socket for death
367 */ 404 */
405 skb->sk->sk_state = TCP_LISTEN;
368 sock_set_flag(skb->sk, SOCK_DEAD); 406 sock_set_flag(skb->sk, SOCK_DEAD);
369 x25_start_heartbeat(skb->sk); 407 x25_start_heartbeat(skb->sk);
370 x25_sk(skb->sk)->state = X25_STATE_0; 408 x25_sk(skb->sk)->state = X25_STATE_0;
@@ -512,15 +550,20 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
512{ 550{
513 struct sock *sk; 551 struct sock *sk;
514 struct x25_sock *x25; 552 struct x25_sock *x25;
515 int rc = -ESOCKTNOSUPPORT; 553 int rc = -EAFNOSUPPORT;
516 554
517 if (!net_eq(net, &init_net)) 555 if (!net_eq(net, &init_net))
518 return -EAFNOSUPPORT; 556 goto out;
557
558 rc = -ESOCKTNOSUPPORT;
559 if (sock->type != SOCK_SEQPACKET)
560 goto out;
519 561
520 if (sock->type != SOCK_SEQPACKET || protocol) 562 rc = -EINVAL;
563 if (protocol)
521 goto out; 564 goto out;
522 565
523 rc = -ENOMEM; 566 rc = -ENOBUFS;
524 if ((sk = x25_alloc_socket(net)) == NULL) 567 if ((sk = x25_alloc_socket(net)) == NULL)
525 goto out; 568 goto out;
526 569
@@ -547,7 +590,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
547 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; 590 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
548 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; 591 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
549 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 592 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
550 x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; 593 x25->facilities.throughput = 0; /* by default don't negotiate
594 throughput */
551 x25->facilities.reverse = X25_DEFAULT_REVERSE; 595 x25->facilities.reverse = X25_DEFAULT_REVERSE;
552 x25->dte_facilities.calling_len = 0; 596 x25->dte_facilities.calling_len = 0;
553 x25->dte_facilities.called_len = 0; 597 x25->dte_facilities.called_len = 0;
@@ -643,7 +687,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
643{ 687{
644 struct sock *sk = sock->sk; 688 struct sock *sk = sock->sk;
645 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 689 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
646 int rc = 0; 690 int len, i, rc = 0;
647 691
648 lock_kernel(); 692 lock_kernel();
649 if (!sock_flag(sk, SOCK_ZAPPED) || 693 if (!sock_flag(sk, SOCK_ZAPPED) ||
@@ -653,6 +697,14 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
653 goto out; 697 goto out;
654 } 698 }
655 699
700 len = strlen(addr->sx25_addr.x25_addr);
701 for (i = 0; i < len; i++) {
702 if (!isdigit(addr->sx25_addr.x25_addr[i])) {
703 rc = -EINVAL;
704 goto out;
705 }
706 }
707
656 x25_sk(sk)->source_addr = addr->sx25_addr; 708 x25_sk(sk)->source_addr = addr->sx25_addr;
657 x25_insert_socket(sk); 709 x25_insert_socket(sk);
658 sock_reset_flag(sk, SOCK_ZAPPED); 710 sock_reset_flag(sk, SOCK_ZAPPED);
@@ -907,16 +959,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
907 /* 959 /*
908 * Extract the X.25 addresses and convert them to ASCII strings, 960 * Extract the X.25 addresses and convert them to ASCII strings,
909 * and remove them. 961 * and remove them.
962 *
963 * Address block is mandatory in call request packets
910 */ 964 */
911 addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); 965 addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
966 if (addr_len <= 0)
967 goto out_clear_request;
912 skb_pull(skb, addr_len); 968 skb_pull(skb, addr_len);
913 969
914 /* 970 /*
915 * Get the length of the facilities, skip past them for the moment 971 * Get the length of the facilities, skip past them for the moment
916 * get the call user data because this is needed to determine 972 * get the call user data because this is needed to determine
917 * the correct listener 973 * the correct listener
974 *
975 * Facilities length is mandatory in call request packets
918 */ 976 */
977 if (skb->len < 1)
978 goto out_clear_request;
919 len = skb->data[0] + 1; 979 len = skb->data[0] + 1;
980 if (skb->len < len)
981 goto out_clear_request;
920 skb_pull(skb,len); 982 skb_pull(skb,len);
921 983
922 /* 984 /*
@@ -1400,9 +1462,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1400 if (facilities.winsize_in < 1 || 1462 if (facilities.winsize_in < 1 ||
1401 facilities.winsize_in > 127) 1463 facilities.winsize_in > 127)
1402 break; 1464 break;
1403 if (facilities.throughput < 0x03 || 1465 if (facilities.throughput) {
1404 facilities.throughput > 0xDD) 1466 int out = facilities.throughput & 0xf0;
1405 break; 1467 int in = facilities.throughput & 0x0f;
1468 if (!out)
1469 facilities.throughput |=
1470 X25_DEFAULT_THROUGHPUT << 4;
1471 else if (out < 0x30 || out > 0xD0)
1472 break;
1473 if (!in)
1474 facilities.throughput |=
1475 X25_DEFAULT_THROUGHPUT;
1476 else if (in < 0x03 || in > 0x0D)
1477 break;
1478 }
1406 if (facilities.reverse && 1479 if (facilities.reverse &&
1407 (facilities.reverse & 0x81) != 0x81) 1480 (facilities.reverse & 0x81) != 0x81)
1408 break; 1481 break;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 3e1efe534645..b9ef682230a0 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -20,6 +20,7 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
23#include <net/sock.h> 24#include <net/sock.h>
24#include <linux/if_arp.h> 25#include <linux/if_arp.h>
25#include <net/x25.h> 26#include <net/x25.h>
@@ -53,7 +54,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
53 if (!sock_owned_by_user(sk)) { 54 if (!sock_owned_by_user(sk)) {
54 queued = x25_process_rx_frame(sk, skb); 55 queued = x25_process_rx_frame(sk, skb);
55 } else { 56 } else {
56 sk_add_backlog(sk, skb); 57 queued = !sk_add_backlog(sk, skb);
57 } 58 }
58 bh_unlock_sock(sk); 59 bh_unlock_sock(sk);
59 sock_put(sk); 60 sock_put(sk);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index a21f6646eb3a..771bab00754b 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
36{ 36{
37 unsigned char *p = skb->data; 37 unsigned char *p = skb->data;
38 unsigned int len = *p++; 38 unsigned int len;
39 39
40 *vc_fac_mask = 0; 40 *vc_fac_mask = 0;
41 41
@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
52 52
53 if (skb->len < 1)
54 return 0;
55
56 len = *p++;
57
58 if (len >= skb->len)
59 return -1;
60
53 while (len > 0) { 61 while (len > 0) {
54 switch (*p & X25_FAC_CLASS_MASK) { 62 switch (*p & X25_FAC_CLASS_MASK) {
55 case X25_FAC_CLASS_A: 63 case X25_FAC_CLASS_A:
@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
247 memcpy(new, ours, sizeof(*new)); 255 memcpy(new, ours, sizeof(*new));
248 256
249 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 257 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
258 if (len < 0)
259 return len;
250 260
251 /* 261 /*
252 * They want reverse charging, we won't accept it. 262 * They want reverse charging, we won't accept it.
@@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
259 new->reverse = theirs.reverse; 269 new->reverse = theirs.reverse;
260 270
261 if (theirs.throughput) { 271 if (theirs.throughput) {
262 if (theirs.throughput < ours->throughput) { 272 int theirs_in = theirs.throughput & 0x0f;
263 SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); 273 int theirs_out = theirs.throughput & 0xf0;
264 new->throughput = theirs.throughput; 274 int ours_in = ours->throughput & 0x0f;
275 int ours_out = ours->throughput & 0xf0;
276 if (!ours_in || theirs_in < ours_in) {
277 SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
278 new->throughput = (new->throughput & 0xf0) | theirs_in;
279 }
280 if (!ours_out || theirs_out < ours_out) {
281 SOCK_DEBUG(sk,
282 "X.25: outbound throughput negotiated\n");
283 new->throughput = (new->throughput & 0x0f) | theirs_out;
265 } 284 }
266 } 285 }
267 286
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index 056a55f3a871..25a810793968 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/if_arp.h> 11#include <linux/if_arp.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/slab.h>
13#include <net/x25.h> 14#include <net/x25.h>
14 15
15LIST_HEAD(x25_forward_list); 16LIST_HEAD(x25_forward_list);
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 96d922783547..372ac226e648 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -23,6 +23,7 @@
23 * i-frames. 23 * i-frames.
24 */ 24 */
25 25
26#include <linux/slab.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/string.h> 29#include <linux/string.h>
@@ -89,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
89static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) 90static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
90{ 91{
91 struct x25_address source_addr, dest_addr; 92 struct x25_address source_addr, dest_addr;
93 int len;
92 94
93 switch (frametype) { 95 switch (frametype) {
94 case X25_CALL_ACCEPTED: { 96 case X25_CALL_ACCEPTED: {
@@ -106,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
106 * Parse the data in the frame. 108 * Parse the data in the frame.
107 */ 109 */
108 skb_pull(skb, X25_STD_MIN_LEN); 110 skb_pull(skb, X25_STD_MIN_LEN);
109 skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); 111
110 skb_pull(skb, 112 len = x25_parse_address_block(skb, &source_addr,
111 x25_parse_facilities(skb, &x25->facilities, 113 &dest_addr);
114 if (len > 0)
115 skb_pull(skb, len);
116
117 len = x25_parse_facilities(skb, &x25->facilities,
112 &x25->dte_facilities, 118 &x25->dte_facilities,
113 &x25->vc_facil_mask)); 119 &x25->vc_facil_mask);
120 if (len > 0)
121 skb_pull(skb, len);
114 /* 122 /*
115 * Copy any Call User Data. 123 * Copy any Call User Data.
116 */ 124 */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index e4e1b6e49538..73e7b954ad28 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/jiffies.h> 25#include <linux/jiffies.h>
26#include <linux/timer.h> 26#include <linux/timer.h>
27#include <linux/slab.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c
index 2b96b52114d6..52351a26b6fc 100644
--- a/net/x25/x25_out.c
+++ b/net/x25/x25_out.c
@@ -22,6 +22,7 @@
22 * needed cleaned seq-number fields. 22 * needed cleaned seq-number fields.
23 */ 23 */
24 24
25#include <linux/slab.h>
25#include <linux/socket.h> 26#include <linux/socket.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/string.h> 28#include <linux/string.h>
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 0a04e62e0e18..7ff373792324 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -25,49 +25,17 @@
25#include <net/x25.h> 25#include <net/x25.h>
26 26
27#ifdef CONFIG_PROC_FS 27#ifdef CONFIG_PROC_FS
28static __inline__ struct x25_route *x25_get_route_idx(loff_t pos)
29{
30 struct list_head *route_entry;
31 struct x25_route *rt = NULL;
32
33 list_for_each(route_entry, &x25_route_list) {
34 rt = list_entry(route_entry, struct x25_route, node);
35 if (!pos--)
36 goto found;
37 }
38 rt = NULL;
39found:
40 return rt;
41}
42 28
43static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos) 29static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
44 __acquires(x25_route_list_lock) 30 __acquires(x25_route_list_lock)
45{ 31{
46 loff_t l = *pos;
47
48 read_lock_bh(&x25_route_list_lock); 32 read_lock_bh(&x25_route_list_lock);
49 return l ? x25_get_route_idx(--l) : SEQ_START_TOKEN; 33 return seq_list_start_head(&x25_route_list, *pos);
50} 34}
51 35
52static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) 36static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
53{ 37{
54 struct x25_route *rt; 38 return seq_list_next(v, &x25_route_list, pos);
55
56 ++*pos;
57 if (v == SEQ_START_TOKEN) {
58 rt = NULL;
59 if (!list_empty(&x25_route_list))
60 rt = list_entry(x25_route_list.next,
61 struct x25_route, node);
62 goto out;
63 }
64 rt = v;
65 if (rt->node.next != &x25_route_list)
66 rt = list_entry(rt->node.next, struct x25_route, node);
67 else
68 rt = NULL;
69out:
70 return rt;
71} 39}
72 40
73static void x25_seq_route_stop(struct seq_file *seq, void *v) 41static void x25_seq_route_stop(struct seq_file *seq, void *v)
@@ -78,9 +46,9 @@ static void x25_seq_route_stop(struct seq_file *seq, void *v)
78 46
79static int x25_seq_route_show(struct seq_file *seq, void *v) 47static int x25_seq_route_show(struct seq_file *seq, void *v)
80{ 48{
81 struct x25_route *rt; 49 struct x25_route *rt = list_entry(v, struct x25_route, node);
82 50
83 if (v == SEQ_START_TOKEN) { 51 if (v == &x25_route_list) {
84 seq_puts(seq, "Address Digits Device\n"); 52 seq_puts(seq, "Address Digits Device\n");
85 goto out; 53 goto out;
86 } 54 }
@@ -93,40 +61,16 @@ out:
93 return 0; 61 return 0;
94} 62}
95 63
96static __inline__ struct sock *x25_get_socket_idx(loff_t pos)
97{
98 struct sock *s;
99 struct hlist_node *node;
100
101 sk_for_each(s, node, &x25_list)
102 if (!pos--)
103 goto found;
104 s = NULL;
105found:
106 return s;
107}
108
109static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos) 64static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
110 __acquires(x25_list_lock) 65 __acquires(x25_list_lock)
111{ 66{
112 loff_t l = *pos;
113
114 read_lock_bh(&x25_list_lock); 67 read_lock_bh(&x25_list_lock);
115 return l ? x25_get_socket_idx(--l) : SEQ_START_TOKEN; 68 return seq_hlist_start_head(&x25_list, *pos);
116} 69}
117 70
118static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) 71static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
119{ 72{
120 struct sock *s; 73 return seq_hlist_next(v, &x25_list, pos);
121
122 ++*pos;
123 if (v == SEQ_START_TOKEN) {
124 s = sk_head(&x25_list);
125 goto out;
126 }
127 s = sk_next(v);
128out:
129 return s;
130} 74}
131 75
132static void x25_seq_socket_stop(struct seq_file *seq, void *v) 76static void x25_seq_socket_stop(struct seq_file *seq, void *v)
@@ -148,7 +92,7 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
148 goto out; 92 goto out;
149 } 93 }
150 94
151 s = v; 95 s = sk_entry(v);
152 x25 = x25_sk(s); 96 x25 = x25_sk(s);
153 97
154 if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL) 98 if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL)
@@ -170,51 +114,16 @@ out:
170 return 0; 114 return 0;
171} 115}
172 116
173static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
174{
175 struct x25_forward *f;
176 struct list_head *entry;
177
178 list_for_each(entry, &x25_forward_list) {
179 f = list_entry(entry, struct x25_forward, node);
180 if (!pos--)
181 goto found;
182 }
183
184 f = NULL;
185found:
186 return f;
187}
188
189static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos) 117static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
190 __acquires(x25_forward_list_lock) 118 __acquires(x25_forward_list_lock)
191{ 119{
192 loff_t l = *pos;
193
194 read_lock_bh(&x25_forward_list_lock); 120 read_lock_bh(&x25_forward_list_lock);
195 return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN; 121 return seq_list_start_head(&x25_forward_list, *pos);
196} 122}
197 123
198static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos) 124static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
199{ 125{
200 struct x25_forward *f; 126 return seq_list_next(v, &x25_forward_list, pos);
201
202 ++*pos;
203 if (v == SEQ_START_TOKEN) {
204 f = NULL;
205 if (!list_empty(&x25_forward_list))
206 f = list_entry(x25_forward_list.next,
207 struct x25_forward, node);
208 goto out;
209 }
210 f = v;
211 if (f->node.next != &x25_forward_list)
212 f = list_entry(f->node.next, struct x25_forward, node);
213 else
214 f = NULL;
215out:
216 return f;
217
218} 127}
219 128
220static void x25_seq_forward_stop(struct seq_file *seq, void *v) 129static void x25_seq_forward_stop(struct seq_file *seq, void *v)
@@ -225,9 +134,9 @@ static void x25_seq_forward_stop(struct seq_file *seq, void *v)
225 134
226static int x25_seq_forward_show(struct seq_file *seq, void *v) 135static int x25_seq_forward_show(struct seq_file *seq, void *v)
227{ 136{
228 struct x25_forward *f; 137 struct x25_forward *f = list_entry(v, struct x25_forward, node);
229 138
230 if (v == SEQ_START_TOKEN) { 139 if (v == &x25_forward_list) {
231 seq_printf(seq, "lci dev1 dev2\n"); 140 seq_printf(seq, "lci dev1 dev2\n");
232 goto out; 141 goto out;
233 } 142 }
@@ -236,7 +145,6 @@ static int x25_seq_forward_show(struct seq_file *seq, void *v)
236 145
237 seq_printf(seq, "%d %-10s %-10s\n", 146 seq_printf(seq, "%d %-10s %-10s\n",
238 f->lci, f->dev1->name, f->dev2->name); 147 f->lci, f->dev1->name, f->dev2->name);
239
240out: 148out:
241 return 0; 149 return 0;
242} 150}
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index b95fae9ab393..97d77c532d8c 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/slab.h>
22#include <net/x25.h> 23#include <net/x25.h>
23 24
24LIST_HEAD(x25_route_list); 25LIST_HEAD(x25_route_list);
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 352b32d216fc..dc20cf12f39b 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -23,6 +23,7 @@
23 * restriction on response. 23 * restriction on response.
24 */ 24 */
25 25
26#include <linux/slab.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/string.h> 28#include <linux/string.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 743c0134a6a9..8b4d6e3246e5 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -125,6 +125,22 @@ static struct xfrm_algo_desc aead_list[] = {
125 .sadb_alg_maxbits = 256 125 .sadb_alg_maxbits = 256
126 } 126 }
127}, 127},
128{
129 .name = "rfc4543(gcm(aes))",
130
131 .uinfo = {
132 .aead = {
133 .icv_truncbits = 128,
134 }
135 },
136
137 .desc = {
138 .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
139 .sadb_alg_ivlen = 8,
140 .sadb_alg_minbits = 128,
141 .sadb_alg_maxbits = 256
142 }
143},
128}; 144};
129 145
130static struct xfrm_algo_desc aalg_list[] = { 146static struct xfrm_algo_desc aalg_list[] = {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e0009c17d809..45f1c98d4fce 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -152,7 +152,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
152 goto drop; 152 goto drop;
153 } 153 }
154 154
155 x = xfrm_state_lookup(net, daddr, spi, nexthdr, family); 155 x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
156 if (x == NULL) { 156 if (x == NULL) {
157 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 157 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
158 xfrm_audit_state_notfound(skb, family, spi, seq); 158 xfrm_audit_state_notfound(skb, family, spi, seq);
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 42cd18391f46..fc91ad7ee26e 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -17,11 +17,11 @@
17 17
18#include <linux/crypto.h> 18#include <linux/crypto.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/gfp.h>
21#include <linux/list.h> 20#include <linux/list.h>
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/mutex.h> 22#include <linux/mutex.h>
24#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/slab.h>
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <net/ip.h> 27#include <net/ip.h>
@@ -30,12 +30,12 @@
30 30
31struct ipcomp_tfms { 31struct ipcomp_tfms {
32 struct list_head list; 32 struct list_head list;
33 struct crypto_comp **tfms; 33 struct crypto_comp * __percpu *tfms;
34 int users; 34 int users;
35}; 35};
36 36
37static DEFINE_MUTEX(ipcomp_resource_mutex); 37static DEFINE_MUTEX(ipcomp_resource_mutex);
38static void **ipcomp_scratches; 38static void * __percpu *ipcomp_scratches;
39static int ipcomp_scratch_users; 39static int ipcomp_scratch_users;
40static LIST_HEAD(ipcomp_tfms_list); 40static LIST_HEAD(ipcomp_tfms_list);
41 41
@@ -200,7 +200,7 @@ EXPORT_SYMBOL_GPL(ipcomp_output);
200static void ipcomp_free_scratches(void) 200static void ipcomp_free_scratches(void)
201{ 201{
202 int i; 202 int i;
203 void **scratches; 203 void * __percpu *scratches;
204 204
205 if (--ipcomp_scratch_users) 205 if (--ipcomp_scratch_users)
206 return; 206 return;
@@ -215,10 +215,10 @@ static void ipcomp_free_scratches(void)
215 free_percpu(scratches); 215 free_percpu(scratches);
216} 216}
217 217
218static void **ipcomp_alloc_scratches(void) 218static void * __percpu *ipcomp_alloc_scratches(void)
219{ 219{
220 int i; 220 int i;
221 void **scratches; 221 void * __percpu *scratches;
222 222
223 if (ipcomp_scratch_users++) 223 if (ipcomp_scratch_users++)
224 return ipcomp_scratches; 224 return ipcomp_scratches;
@@ -239,7 +239,7 @@ static void **ipcomp_alloc_scratches(void)
239 return scratches; 239 return scratches;
240} 240}
241 241
242static void ipcomp_free_tfms(struct crypto_comp **tfms) 242static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
243{ 243{
244 struct ipcomp_tfms *pos; 244 struct ipcomp_tfms *pos;
245 int cpu; 245 int cpu;
@@ -267,10 +267,10 @@ static void ipcomp_free_tfms(struct crypto_comp **tfms)
267 free_percpu(tfms); 267 free_percpu(tfms);
268} 268}
269 269
270static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 270static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
271{ 271{
272 struct ipcomp_tfms *pos; 272 struct ipcomp_tfms *pos;
273 struct crypto_comp **tfms; 273 struct crypto_comp * __percpu *tfms;
274 int cpu; 274 int cpu;
275 275
276 /* This can be any valid CPU ID so we don't need locking. */ 276 /* This can be any valid CPU ID so we don't need locking. */
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index b9fe13138c07..6a329158bdfa 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -14,6 +14,7 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/netfilter.h> 15#include <linux/netfilter.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/slab.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
18#include <net/dst.h> 19#include <net/dst.h>
19#include <net/xfrm.h> 20#include <net/xfrm.h>
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0ecb16a9a883..843e066649cb 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -556,6 +556,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
556 struct hlist_head *chain; 556 struct hlist_head *chain;
557 struct hlist_node *entry, *newpos; 557 struct hlist_node *entry, *newpos;
558 struct dst_entry *gc_list; 558 struct dst_entry *gc_list;
559 u32 mark = policy->mark.v & policy->mark.m;
559 560
560 write_lock_bh(&xfrm_policy_lock); 561 write_lock_bh(&xfrm_policy_lock);
561 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 562 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
@@ -564,6 +565,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
564 hlist_for_each_entry(pol, entry, chain, bydst) { 565 hlist_for_each_entry(pol, entry, chain, bydst) {
565 if (pol->type == policy->type && 566 if (pol->type == policy->type &&
566 !selector_cmp(&pol->selector, &policy->selector) && 567 !selector_cmp(&pol->selector, &policy->selector) &&
568 (mark & pol->mark.m) == pol->mark.v &&
567 xfrm_sec_ctx_match(pol->security, policy->security) && 569 xfrm_sec_ctx_match(pol->security, policy->security) &&
568 !WARN_ON(delpol)) { 570 !WARN_ON(delpol)) {
569 if (excl) { 571 if (excl) {
@@ -635,8 +637,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
635} 637}
636EXPORT_SYMBOL(xfrm_policy_insert); 638EXPORT_SYMBOL(xfrm_policy_insert);
637 639
638struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir, 640struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
639 struct xfrm_selector *sel, 641 int dir, struct xfrm_selector *sel,
640 struct xfrm_sec_ctx *ctx, int delete, 642 struct xfrm_sec_ctx *ctx, int delete,
641 int *err) 643 int *err)
642{ 644{
@@ -650,6 +652,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
650 ret = NULL; 652 ret = NULL;
651 hlist_for_each_entry(pol, entry, chain, bydst) { 653 hlist_for_each_entry(pol, entry, chain, bydst) {
652 if (pol->type == type && 654 if (pol->type == type &&
655 (mark & pol->mark.m) == pol->mark.v &&
653 !selector_cmp(sel, &pol->selector) && 656 !selector_cmp(sel, &pol->selector) &&
654 xfrm_sec_ctx_match(ctx, pol->security)) { 657 xfrm_sec_ctx_match(ctx, pol->security)) {
655 xfrm_pol_hold(pol); 658 xfrm_pol_hold(pol);
@@ -676,8 +679,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
676} 679}
677EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 680EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
678 681
679struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id, 682struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
680 int delete, int *err) 683 int dir, u32 id, int delete, int *err)
681{ 684{
682 struct xfrm_policy *pol, *ret; 685 struct xfrm_policy *pol, *ret;
683 struct hlist_head *chain; 686 struct hlist_head *chain;
@@ -692,7 +695,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
692 chain = net->xfrm.policy_byidx + idx_hash(net, id); 695 chain = net->xfrm.policy_byidx + idx_hash(net, id);
693 ret = NULL; 696 ret = NULL;
694 hlist_for_each_entry(pol, entry, chain, byidx) { 697 hlist_for_each_entry(pol, entry, chain, byidx) {
695 if (pol->type == type && pol->index == id) { 698 if (pol->type == type && pol->index == id &&
699 (mark & pol->mark.m) == pol->mark.v) {
696 xfrm_pol_hold(pol); 700 xfrm_pol_hold(pol);
697 if (delete) { 701 if (delete) {
698 *err = security_xfrm_policy_delete( 702 *err = security_xfrm_policy_delete(
@@ -771,7 +775,8 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
771 775
772int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 776int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
773{ 777{
774 int dir, err = 0; 778 int dir, err = 0, cnt = 0;
779 struct xfrm_policy *dp;
775 780
776 write_lock_bh(&xfrm_policy_lock); 781 write_lock_bh(&xfrm_policy_lock);
777 782
@@ -789,8 +794,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
789 &net->xfrm.policy_inexact[dir], bydst) { 794 &net->xfrm.policy_inexact[dir], bydst) {
790 if (pol->type != type) 795 if (pol->type != type)
791 continue; 796 continue;
792 __xfrm_policy_unlink(pol, dir); 797 dp = __xfrm_policy_unlink(pol, dir);
793 write_unlock_bh(&xfrm_policy_lock); 798 write_unlock_bh(&xfrm_policy_lock);
799 if (dp)
800 cnt++;
794 801
795 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 802 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
796 audit_info->sessionid, 803 audit_info->sessionid,
@@ -809,8 +816,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
809 bydst) { 816 bydst) {
810 if (pol->type != type) 817 if (pol->type != type)
811 continue; 818 continue;
812 __xfrm_policy_unlink(pol, dir); 819 dp = __xfrm_policy_unlink(pol, dir);
813 write_unlock_bh(&xfrm_policy_lock); 820 write_unlock_bh(&xfrm_policy_lock);
821 if (dp)
822 cnt++;
814 823
815 xfrm_audit_policy_delete(pol, 1, 824 xfrm_audit_policy_delete(pol, 1,
816 audit_info->loginuid, 825 audit_info->loginuid,
@@ -824,6 +833,8 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
824 } 833 }
825 834
826 } 835 }
836 if (!cnt)
837 err = -ESRCH;
827 atomic_inc(&flow_cache_genid); 838 atomic_inc(&flow_cache_genid);
828out: 839out:
829 write_unlock_bh(&xfrm_policy_lock); 840 write_unlock_bh(&xfrm_policy_lock);
@@ -909,6 +920,7 @@ static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
909 int match, ret = -ESRCH; 920 int match, ret = -ESRCH;
910 921
911 if (pol->family != family || 922 if (pol->family != family ||
923 (fl->mark & pol->mark.m) != pol->mark.v ||
912 pol->type != type) 924 pol->type != type)
913 return ret; 925 return ret;
914 926
@@ -1033,6 +1045,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1033 int err = 0; 1045 int err = 0;
1034 1046
1035 if (match) { 1047 if (match) {
1048 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1049 pol = NULL;
1050 goto out;
1051 }
1036 err = security_xfrm_policy_lookup(pol->security, 1052 err = security_xfrm_policy_lookup(pol->security,
1037 fl->secid, 1053 fl->secid,
1038 policy_to_flow_dir(dir)); 1054 policy_to_flow_dir(dir));
@@ -1045,6 +1061,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1045 } else 1061 } else
1046 pol = NULL; 1062 pol = NULL;
1047 } 1063 }
1064out:
1048 read_unlock_bh(&xfrm_policy_lock); 1065 read_unlock_bh(&xfrm_policy_lock);
1049 return pol; 1066 return pol;
1050} 1067}
@@ -1137,6 +1154,7 @@ static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1137 } 1154 }
1138 newp->lft = old->lft; 1155 newp->lft = old->lft;
1139 newp->curlft = old->curlft; 1156 newp->curlft = old->curlft;
1157 newp->mark = old->mark;
1140 newp->action = old->action; 1158 newp->action = old->action;
1141 newp->flags = old->flags; 1159 newp->flags = old->flags;
1142 newp->xfrm_nr = old->xfrm_nr; 1160 newp->xfrm_nr = old->xfrm_nr;
@@ -1354,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1354 return err; 1372 return err;
1355} 1373}
1356 1374
1357static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) 1375static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1376 struct flowi *fl)
1358{ 1377{
1359 struct xfrm_policy_afinfo *afinfo = 1378 struct xfrm_policy_afinfo *afinfo =
1360 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1379 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1363,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
1363 if (!afinfo) 1382 if (!afinfo)
1364 return -EINVAL; 1383 return -EINVAL;
1365 1384
1366 err = afinfo->fill_dst(xdst, dev); 1385 err = afinfo->fill_dst(xdst, dev, fl);
1367 1386
1368 xfrm_policy_put_afinfo(afinfo); 1387 xfrm_policy_put_afinfo(afinfo);
1369 1388
@@ -1468,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1468 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1487 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1469 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1488 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1470 1489
1471 err = xfrm_fill_dst(xdst, dev); 1490 err = xfrm_fill_dst(xdst, dev, fl);
1472 if (err) 1491 if (err)
1473 goto free_dst; 1492 goto free_dst;
1474 1493
@@ -2045,8 +2064,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2045 int res; 2064 int res;
2046 2065
2047 if (xfrm_decode_session(skb, &fl, family) < 0) { 2066 if (xfrm_decode_session(skb, &fl, family) < 0) {
2048 /* XXX: we should have something like FWDHDRERROR here. */ 2067 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2049 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2050 return 0; 2068 return 0;
2051 } 2069 }
2052 2070
@@ -2421,19 +2439,19 @@ static int __net_init xfrm_statistics_init(struct net *net)
2421{ 2439{
2422 int rv; 2440 int rv;
2423 2441
2424 if (snmp_mib_init((void **)net->mib.xfrm_statistics, 2442 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2425 sizeof(struct linux_xfrm_mib)) < 0) 2443 sizeof(struct linux_xfrm_mib)) < 0)
2426 return -ENOMEM; 2444 return -ENOMEM;
2427 rv = xfrm_proc_init(net); 2445 rv = xfrm_proc_init(net);
2428 if (rv < 0) 2446 if (rv < 0)
2429 snmp_mib_free((void **)net->mib.xfrm_statistics); 2447 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2430 return rv; 2448 return rv;
2431} 2449}
2432 2450
2433static void xfrm_statistics_fini(struct net *net) 2451static void xfrm_statistics_fini(struct net *net)
2434{ 2452{
2435 xfrm_proc_fini(net); 2453 xfrm_proc_fini(net);
2436 snmp_mib_free((void **)net->mib.xfrm_statistics); 2454 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2437} 2455}
2438#else 2456#else
2439static int __net_init xfrm_statistics_init(struct net *net) 2457static int __net_init xfrm_statistics_init(struct net *net)
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fef8db553e8d..58d9ae005597 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -15,7 +15,7 @@
15#include <net/snmp.h> 15#include <net/snmp.h>
16#include <net/xfrm.h> 16#include <net/xfrm.h>
17 17
18static struct snmp_mib xfrm_mib_list[] = { 18static const struct snmp_mib xfrm_mib_list[] = {
19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR), 19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR),
20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR), 20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR),
21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR), 21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR),
@@ -41,6 +41,7 @@ static struct snmp_mib xfrm_mib_list[] = {
41 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK), 41 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK),
42 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD), 42 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
43 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR), 43 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
44 SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
44 SNMP_MIB_SENTINEL 45 SNMP_MIB_SENTINEL
45}; 46};
46 47
@@ -50,7 +51,8 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
50 int i; 51 int i;
51 for (i=0; xfrm_mib_list[i].name; i++) 52 for (i=0; xfrm_mib_list[i].name; i++)
52 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, 53 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
53 snmp_fold_field((void **)net->mib.xfrm_statistics, 54 snmp_fold_field((void __percpu **)
55 net->mib.xfrm_statistics,
54 xfrm_mib_list[i].entry)); 56 xfrm_mib_list[i].entry));
55 return 0; 57 return 0;
56} 58}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index f445ea1c5f52..add77ecb8ac4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -22,6 +22,7 @@
22#include <linux/audit.h> 22#include <linux/audit.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <linux/ktime.h> 24#include <linux/ktime.h>
25#include <linux/slab.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27 28
@@ -603,13 +604,14 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
603 604
604int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) 605int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
605{ 606{
606 int i, err = 0; 607 int i, err = 0, cnt = 0;
607 608
608 spin_lock_bh(&xfrm_state_lock); 609 spin_lock_bh(&xfrm_state_lock);
609 err = xfrm_state_flush_secctx_check(net, proto, audit_info); 610 err = xfrm_state_flush_secctx_check(net, proto, audit_info);
610 if (err) 611 if (err)
611 goto out; 612 goto out;
612 613
614 err = -ESRCH;
613 for (i = 0; i <= net->xfrm.state_hmask; i++) { 615 for (i = 0; i <= net->xfrm.state_hmask; i++) {
614 struct hlist_node *entry; 616 struct hlist_node *entry;
615 struct xfrm_state *x; 617 struct xfrm_state *x;
@@ -626,13 +628,16 @@ restart:
626 audit_info->sessionid, 628 audit_info->sessionid,
627 audit_info->secid); 629 audit_info->secid);
628 xfrm_state_put(x); 630 xfrm_state_put(x);
631 if (!err)
632 cnt++;
629 633
630 spin_lock_bh(&xfrm_state_lock); 634 spin_lock_bh(&xfrm_state_lock);
631 goto restart; 635 goto restart;
632 } 636 }
633 } 637 }
634 } 638 }
635 err = 0; 639 if (cnt)
640 err = 0;
636 641
637out: 642out:
638 spin_unlock_bh(&xfrm_state_lock); 643 spin_unlock_bh(&xfrm_state_lock);
@@ -665,7 +670,7 @@ xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
665 return 0; 670 return 0;
666} 671}
667 672
668static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) 673static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
669{ 674{
670 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); 675 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
671 struct xfrm_state *x; 676 struct xfrm_state *x;
@@ -678,6 +683,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
678 xfrm_addr_cmp(&x->id.daddr, daddr, family)) 683 xfrm_addr_cmp(&x->id.daddr, daddr, family))
679 continue; 684 continue;
680 685
686 if ((mark & x->mark.m) != x->mark.v)
687 continue;
681 xfrm_state_hold(x); 688 xfrm_state_hold(x);
682 return x; 689 return x;
683 } 690 }
@@ -685,7 +692,7 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
685 return NULL; 692 return NULL;
686} 693}
687 694
688static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family) 695static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
689{ 696{
690 unsigned int h = xfrm_src_hash(net, daddr, saddr, family); 697 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
691 struct xfrm_state *x; 698 struct xfrm_state *x;
@@ -698,6 +705,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
698 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 705 xfrm_addr_cmp(&x->props.saddr, saddr, family))
699 continue; 706 continue;
700 707
708 if ((mark & x->mark.m) != x->mark.v)
709 continue;
701 xfrm_state_hold(x); 710 xfrm_state_hold(x);
702 return x; 711 return x;
703 } 712 }
@@ -709,12 +718,14 @@ static inline struct xfrm_state *
709__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 718__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
710{ 719{
711 struct net *net = xs_net(x); 720 struct net *net = xs_net(x);
721 u32 mark = x->mark.v & x->mark.m;
712 722
713 if (use_spi) 723 if (use_spi)
714 return __xfrm_state_lookup(net, &x->id.daddr, x->id.spi, 724 return __xfrm_state_lookup(net, mark, &x->id.daddr,
715 x->id.proto, family); 725 x->id.spi, x->id.proto, family);
716 else 726 else
717 return __xfrm_state_lookup_byaddr(net, &x->id.daddr, 727 return __xfrm_state_lookup_byaddr(net, mark,
728 &x->id.daddr,
718 &x->props.saddr, 729 &x->props.saddr,
719 x->id.proto, family); 730 x->id.proto, family);
720} 731}
@@ -779,6 +790,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
779 int acquire_in_progress = 0; 790 int acquire_in_progress = 0;
780 int error = 0; 791 int error = 0;
781 struct xfrm_state *best = NULL; 792 struct xfrm_state *best = NULL;
793 u32 mark = pol->mark.v & pol->mark.m;
782 794
783 to_put = NULL; 795 to_put = NULL;
784 796
@@ -787,6 +799,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
787 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 799 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
788 if (x->props.family == family && 800 if (x->props.family == family &&
789 x->props.reqid == tmpl->reqid && 801 x->props.reqid == tmpl->reqid &&
802 (mark & x->mark.m) == x->mark.v &&
790 !(x->props.flags & XFRM_STATE_WILDRECV) && 803 !(x->props.flags & XFRM_STATE_WILDRECV) &&
791 xfrm_state_addr_check(x, daddr, saddr, family) && 804 xfrm_state_addr_check(x, daddr, saddr, family) &&
792 tmpl->mode == x->props.mode && 805 tmpl->mode == x->props.mode &&
@@ -802,6 +815,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
802 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 815 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
803 if (x->props.family == family && 816 if (x->props.family == family &&
804 x->props.reqid == tmpl->reqid && 817 x->props.reqid == tmpl->reqid &&
818 (mark & x->mark.m) == x->mark.v &&
805 !(x->props.flags & XFRM_STATE_WILDRECV) && 819 !(x->props.flags & XFRM_STATE_WILDRECV) &&
806 xfrm_state_addr_check(x, daddr, saddr, family) && 820 xfrm_state_addr_check(x, daddr, saddr, family) &&
807 tmpl->mode == x->props.mode && 821 tmpl->mode == x->props.mode &&
@@ -815,7 +829,7 @@ found:
815 x = best; 829 x = best;
816 if (!x && !error && !acquire_in_progress) { 830 if (!x && !error && !acquire_in_progress) {
817 if (tmpl->id.spi && 831 if (tmpl->id.spi &&
818 (x0 = __xfrm_state_lookup(net, daddr, tmpl->id.spi, 832 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
819 tmpl->id.proto, family)) != NULL) { 833 tmpl->id.proto, family)) != NULL) {
820 to_put = x0; 834 to_put = x0;
821 error = -EEXIST; 835 error = -EEXIST;
@@ -829,6 +843,7 @@ found:
829 /* Initialize temporary selector matching only 843 /* Initialize temporary selector matching only
830 * to current session. */ 844 * to current session. */
831 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 845 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
846 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
832 847
833 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); 848 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
834 if (error) { 849 if (error) {
@@ -871,7 +886,7 @@ out:
871} 886}
872 887
873struct xfrm_state * 888struct xfrm_state *
874xfrm_stateonly_find(struct net *net, 889xfrm_stateonly_find(struct net *net, u32 mark,
875 xfrm_address_t *daddr, xfrm_address_t *saddr, 890 xfrm_address_t *daddr, xfrm_address_t *saddr,
876 unsigned short family, u8 mode, u8 proto, u32 reqid) 891 unsigned short family, u8 mode, u8 proto, u32 reqid)
877{ 892{
@@ -884,6 +899,7 @@ xfrm_stateonly_find(struct net *net,
884 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 899 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
885 if (x->props.family == family && 900 if (x->props.family == family &&
886 x->props.reqid == reqid && 901 x->props.reqid == reqid &&
902 (mark & x->mark.m) == x->mark.v &&
887 !(x->props.flags & XFRM_STATE_WILDRECV) && 903 !(x->props.flags & XFRM_STATE_WILDRECV) &&
888 xfrm_state_addr_check(x, daddr, saddr, family) && 904 xfrm_state_addr_check(x, daddr, saddr, family) &&
889 mode == x->props.mode && 905 mode == x->props.mode &&
@@ -946,11 +962,13 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
946 struct xfrm_state *x; 962 struct xfrm_state *x;
947 struct hlist_node *entry; 963 struct hlist_node *entry;
948 unsigned int h; 964 unsigned int h;
965 u32 mark = xnew->mark.v & xnew->mark.m;
949 966
950 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 967 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
951 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 968 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
952 if (x->props.family == family && 969 if (x->props.family == family &&
953 x->props.reqid == reqid && 970 x->props.reqid == reqid &&
971 (mark & x->mark.m) == x->mark.v &&
954 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && 972 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
955 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) 973 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
956 x->genid = xfrm_state_genid; 974 x->genid = xfrm_state_genid;
@@ -967,11 +985,12 @@ void xfrm_state_insert(struct xfrm_state *x)
967EXPORT_SYMBOL(xfrm_state_insert); 985EXPORT_SYMBOL(xfrm_state_insert);
968 986
969/* xfrm_state_lock is held */ 987/* xfrm_state_lock is held */
970static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create) 988static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
971{ 989{
972 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 990 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
973 struct hlist_node *entry; 991 struct hlist_node *entry;
974 struct xfrm_state *x; 992 struct xfrm_state *x;
993 u32 mark = m->v & m->m;
975 994
976 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 995 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
977 if (x->props.reqid != reqid || 996 if (x->props.reqid != reqid ||
@@ -980,6 +999,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
980 x->km.state != XFRM_STATE_ACQ || 999 x->km.state != XFRM_STATE_ACQ ||
981 x->id.spi != 0 || 1000 x->id.spi != 0 ||
982 x->id.proto != proto || 1001 x->id.proto != proto ||
1002 (mark & x->mark.m) != x->mark.v ||
983 xfrm_addr_cmp(&x->id.daddr, daddr, family) || 1003 xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
984 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 1004 xfrm_addr_cmp(&x->props.saddr, saddr, family))
985 continue; 1005 continue;
@@ -1022,6 +1042,8 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
1022 x->props.family = family; 1042 x->props.family = family;
1023 x->props.mode = mode; 1043 x->props.mode = mode;
1024 x->props.reqid = reqid; 1044 x->props.reqid = reqid;
1045 x->mark.v = m->v;
1046 x->mark.m = m->m;
1025 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1047 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1026 xfrm_state_hold(x); 1048 xfrm_state_hold(x);
1027 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); 1049 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
@@ -1038,7 +1060,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
1038 return x; 1060 return x;
1039} 1061}
1040 1062
1041static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq); 1063static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1042 1064
1043int xfrm_state_add(struct xfrm_state *x) 1065int xfrm_state_add(struct xfrm_state *x)
1044{ 1066{
@@ -1046,6 +1068,7 @@ int xfrm_state_add(struct xfrm_state *x)
1046 struct xfrm_state *x1, *to_put; 1068 struct xfrm_state *x1, *to_put;
1047 int family; 1069 int family;
1048 int err; 1070 int err;
1071 u32 mark = x->mark.v & x->mark.m;
1049 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1072 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1050 1073
1051 family = x->props.family; 1074 family = x->props.family;
@@ -1063,7 +1086,7 @@ int xfrm_state_add(struct xfrm_state *x)
1063 } 1086 }
1064 1087
1065 if (use_spi && x->km.seq) { 1088 if (use_spi && x->km.seq) {
1066 x1 = __xfrm_find_acq_byseq(net, x->km.seq); 1089 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1067 if (x1 && ((x1->id.proto != x->id.proto) || 1090 if (x1 && ((x1->id.proto != x->id.proto) ||
1068 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { 1091 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1069 to_put = x1; 1092 to_put = x1;
@@ -1072,8 +1095,8 @@ int xfrm_state_add(struct xfrm_state *x)
1072 } 1095 }
1073 1096
1074 if (use_spi && !x1) 1097 if (use_spi && !x1)
1075 x1 = __find_acq_core(net, family, x->props.mode, x->props.reqid, 1098 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1076 x->id.proto, 1099 x->props.reqid, x->id.proto,
1077 &x->id.daddr, &x->props.saddr, 0); 1100 &x->id.daddr, &x->props.saddr, 0);
1078 1101
1079 __xfrm_state_bump_genids(x); 1102 __xfrm_state_bump_genids(x);
@@ -1147,6 +1170,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1147 goto error; 1170 goto error;
1148 } 1171 }
1149 1172
1173 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1174
1150 err = xfrm_init_state(x); 1175 err = xfrm_init_state(x);
1151 if (err) 1176 if (err)
1152 goto error; 1177 goto error;
@@ -1338,41 +1363,41 @@ int xfrm_state_check_expire(struct xfrm_state *x)
1338EXPORT_SYMBOL(xfrm_state_check_expire); 1363EXPORT_SYMBOL(xfrm_state_check_expire);
1339 1364
1340struct xfrm_state * 1365struct xfrm_state *
1341xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, 1366xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi,
1342 unsigned short family) 1367 u8 proto, unsigned short family)
1343{ 1368{
1344 struct xfrm_state *x; 1369 struct xfrm_state *x;
1345 1370
1346 spin_lock_bh(&xfrm_state_lock); 1371 spin_lock_bh(&xfrm_state_lock);
1347 x = __xfrm_state_lookup(net, daddr, spi, proto, family); 1372 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1348 spin_unlock_bh(&xfrm_state_lock); 1373 spin_unlock_bh(&xfrm_state_lock);
1349 return x; 1374 return x;
1350} 1375}
1351EXPORT_SYMBOL(xfrm_state_lookup); 1376EXPORT_SYMBOL(xfrm_state_lookup);
1352 1377
1353struct xfrm_state * 1378struct xfrm_state *
1354xfrm_state_lookup_byaddr(struct net *net, 1379xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1355 xfrm_address_t *daddr, xfrm_address_t *saddr, 1380 xfrm_address_t *daddr, xfrm_address_t *saddr,
1356 u8 proto, unsigned short family) 1381 u8 proto, unsigned short family)
1357{ 1382{
1358 struct xfrm_state *x; 1383 struct xfrm_state *x;
1359 1384
1360 spin_lock_bh(&xfrm_state_lock); 1385 spin_lock_bh(&xfrm_state_lock);
1361 x = __xfrm_state_lookup_byaddr(net, daddr, saddr, proto, family); 1386 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1362 spin_unlock_bh(&xfrm_state_lock); 1387 spin_unlock_bh(&xfrm_state_lock);
1363 return x; 1388 return x;
1364} 1389}
1365EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 1390EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1366 1391
1367struct xfrm_state * 1392struct xfrm_state *
1368xfrm_find_acq(struct net *net, u8 mode, u32 reqid, u8 proto, 1393xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
1369 xfrm_address_t *daddr, xfrm_address_t *saddr, 1394 xfrm_address_t *daddr, xfrm_address_t *saddr,
1370 int create, unsigned short family) 1395 int create, unsigned short family)
1371{ 1396{
1372 struct xfrm_state *x; 1397 struct xfrm_state *x;
1373 1398
1374 spin_lock_bh(&xfrm_state_lock); 1399 spin_lock_bh(&xfrm_state_lock);
1375 x = __find_acq_core(net, family, mode, reqid, proto, daddr, saddr, create); 1400 x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
1376 spin_unlock_bh(&xfrm_state_lock); 1401 spin_unlock_bh(&xfrm_state_lock);
1377 1402
1378 return x; 1403 return x;
@@ -1419,7 +1444,7 @@ EXPORT_SYMBOL(xfrm_state_sort);
1419 1444
1420/* Silly enough, but I'm lazy to build resolution list */ 1445/* Silly enough, but I'm lazy to build resolution list */
1421 1446
1422static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq) 1447static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1423{ 1448{
1424 int i; 1449 int i;
1425 1450
@@ -1429,6 +1454,7 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
1429 1454
1430 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 1455 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
1431 if (x->km.seq == seq && 1456 if (x->km.seq == seq &&
1457 (mark & x->mark.m) == x->mark.v &&
1432 x->km.state == XFRM_STATE_ACQ) { 1458 x->km.state == XFRM_STATE_ACQ) {
1433 xfrm_state_hold(x); 1459 xfrm_state_hold(x);
1434 return x; 1460 return x;
@@ -1438,12 +1464,12 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
1438 return NULL; 1464 return NULL;
1439} 1465}
1440 1466
1441struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq) 1467struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1442{ 1468{
1443 struct xfrm_state *x; 1469 struct xfrm_state *x;
1444 1470
1445 spin_lock_bh(&xfrm_state_lock); 1471 spin_lock_bh(&xfrm_state_lock);
1446 x = __xfrm_find_acq_byseq(net, seq); 1472 x = __xfrm_find_acq_byseq(net, mark, seq);
1447 spin_unlock_bh(&xfrm_state_lock); 1473 spin_unlock_bh(&xfrm_state_lock);
1448 return x; 1474 return x;
1449} 1475}
@@ -1452,12 +1478,12 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
1452u32 xfrm_get_acqseq(void) 1478u32 xfrm_get_acqseq(void)
1453{ 1479{
1454 u32 res; 1480 u32 res;
1455 static u32 acqseq; 1481 static atomic_t acqseq;
1456 static DEFINE_SPINLOCK(acqseq_lock); 1482
1483 do {
1484 res = atomic_inc_return(&acqseq);
1485 } while (!res);
1457 1486
1458 spin_lock_bh(&acqseq_lock);
1459 res = (++acqseq ? : ++acqseq);
1460 spin_unlock_bh(&acqseq_lock);
1461 return res; 1487 return res;
1462} 1488}
1463EXPORT_SYMBOL(xfrm_get_acqseq); 1489EXPORT_SYMBOL(xfrm_get_acqseq);
@@ -1470,6 +1496,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1470 int err = -ENOENT; 1496 int err = -ENOENT;
1471 __be32 minspi = htonl(low); 1497 __be32 minspi = htonl(low);
1472 __be32 maxspi = htonl(high); 1498 __be32 maxspi = htonl(high);
1499 u32 mark = x->mark.v & x->mark.m;
1473 1500
1474 spin_lock_bh(&x->lock); 1501 spin_lock_bh(&x->lock);
1475 if (x->km.state == XFRM_STATE_DEAD) 1502 if (x->km.state == XFRM_STATE_DEAD)
@@ -1482,7 +1509,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1482 err = -ENOENT; 1509 err = -ENOENT;
1483 1510
1484 if (minspi == maxspi) { 1511 if (minspi == maxspi) {
1485 x0 = xfrm_state_lookup(net, &x->id.daddr, minspi, x->id.proto, x->props.family); 1512 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1486 if (x0) { 1513 if (x0) {
1487 xfrm_state_put(x0); 1514 xfrm_state_put(x0);
1488 goto unlock; 1515 goto unlock;
@@ -1492,7 +1519,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1492 u32 spi = 0; 1519 u32 spi = 0;
1493 for (h=0; h<high-low+1; h++) { 1520 for (h=0; h<high-low+1; h++) {
1494 spi = low + net_random()%(high-low+1); 1521 spi = low + net_random()%(high-low+1);
1495 x0 = xfrm_state_lookup(net, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 1522 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1496 if (x0 == NULL) { 1523 if (x0 == NULL) {
1497 x->id.spi = htonl(spi); 1524 x->id.spi = htonl(spi);
1498 break; 1525 break;
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 2e221f2cad7e..05640bc9594b 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -1,8 +1,9 @@
1#include <linux/sysctl.h> 1#include <linux/sysctl.h>
2#include <linux/slab.h>
2#include <net/net_namespace.h> 3#include <net/net_namespace.h>
3#include <net/xfrm.h> 4#include <net/xfrm.h>
4 5
5static void __xfrm_sysctl_init(struct net *net) 6static void __net_init __xfrm_sysctl_init(struct net *net)
6{ 7{
7 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME; 8 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME;
8 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE; 9 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE;
@@ -64,7 +65,7 @@ out_kmemdup:
64 return -ENOMEM; 65 return -ENOMEM;
65} 66}
66 67
67void xfrm_sysctl_fini(struct net *net) 68void __net_exit xfrm_sysctl_fini(struct net *net)
68{ 69{
69 struct ctl_table *table; 70 struct ctl_table *table;
70 71
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d5a712976004..6106b72826d3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -446,6 +446,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
446 goto error; 446 goto error;
447 } 447 }
448 448
449 xfrm_mark_get(attrs, &x->mark);
450
449 err = xfrm_init_state(x); 451 err = xfrm_init_state(x);
450 if (err) 452 if (err)
451 goto error; 453 goto error;
@@ -526,11 +528,13 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
526 int *errp) 528 int *errp)
527{ 529{
528 struct xfrm_state *x = NULL; 530 struct xfrm_state *x = NULL;
531 struct xfrm_mark m;
529 int err; 532 int err;
533 u32 mark = xfrm_mark_get(attrs, &m);
530 534
531 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 535 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
532 err = -ESRCH; 536 err = -ESRCH;
533 x = xfrm_state_lookup(net, &p->daddr, p->spi, p->proto, p->family); 537 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
534 } else { 538 } else {
535 xfrm_address_t *saddr = NULL; 539 xfrm_address_t *saddr = NULL;
536 540
@@ -541,7 +545,8 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
541 } 545 }
542 546
543 err = -ESRCH; 547 err = -ESRCH;
544 x = xfrm_state_lookup_byaddr(net, &p->daddr, saddr, 548 x = xfrm_state_lookup_byaddr(net, mark,
549 &p->daddr, saddr,
545 p->proto, p->family); 550 p->proto, p->family);
546 } 551 }
547 552
@@ -683,6 +688,9 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
683 if (x->encap) 688 if (x->encap)
684 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 689 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
685 690
691 if (xfrm_mark_put(skb, &x->mark))
692 goto nla_put_failure;
693
686 if (x->security && copy_sec_ctx(x->security, skb) < 0) 694 if (x->security && copy_sec_ctx(x->security, skb) < 0)
687 goto nla_put_failure; 695 goto nla_put_failure;
688 696
@@ -947,6 +955,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
947 xfrm_address_t *daddr; 955 xfrm_address_t *daddr;
948 int family; 956 int family;
949 int err; 957 int err;
958 u32 mark;
959 struct xfrm_mark m;
950 960
951 p = nlmsg_data(nlh); 961 p = nlmsg_data(nlh);
952 err = verify_userspi_info(p); 962 err = verify_userspi_info(p);
@@ -957,8 +967,10 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
957 daddr = &p->info.id.daddr; 967 daddr = &p->info.id.daddr;
958 968
959 x = NULL; 969 x = NULL;
970
971 mark = xfrm_mark_get(attrs, &m);
960 if (p->info.seq) { 972 if (p->info.seq) {
961 x = xfrm_find_acq_byseq(net, p->info.seq); 973 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
962 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 974 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
963 xfrm_state_put(x); 975 xfrm_state_put(x);
964 x = NULL; 976 x = NULL;
@@ -966,7 +978,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
966 } 978 }
967 979
968 if (!x) 980 if (!x)
969 x = xfrm_find_acq(net, p->info.mode, p->info.reqid, 981 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
970 p->info.id.proto, daddr, 982 p->info.id.proto, daddr,
971 &p->info.saddr, 1, 983 &p->info.saddr, 1,
972 family); 984 family);
@@ -1220,6 +1232,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
1220 if (err) 1232 if (err)
1221 goto error; 1233 goto error;
1222 1234
1235 xfrm_mark_get(attrs, &xp->mark);
1236
1223 return xp; 1237 return xp;
1224 error: 1238 error:
1225 *errp = err; 1239 *errp = err;
@@ -1366,10 +1380,13 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1366 goto nlmsg_failure; 1380 goto nlmsg_failure;
1367 if (copy_to_user_policy_type(xp->type, skb) < 0) 1381 if (copy_to_user_policy_type(xp->type, skb) < 0)
1368 goto nlmsg_failure; 1382 goto nlmsg_failure;
1383 if (xfrm_mark_put(skb, &xp->mark))
1384 goto nla_put_failure;
1369 1385
1370 nlmsg_end(skb, nlh); 1386 nlmsg_end(skb, nlh);
1371 return 0; 1387 return 0;
1372 1388
1389nla_put_failure:
1373nlmsg_failure: 1390nlmsg_failure:
1374 nlmsg_cancel(skb, nlh); 1391 nlmsg_cancel(skb, nlh);
1375 return -EMSGSIZE; 1392 return -EMSGSIZE;
@@ -1441,6 +1458,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1441 int err; 1458 int err;
1442 struct km_event c; 1459 struct km_event c;
1443 int delete; 1460 int delete;
1461 struct xfrm_mark m;
1462 u32 mark = xfrm_mark_get(attrs, &m);
1444 1463
1445 p = nlmsg_data(nlh); 1464 p = nlmsg_data(nlh);
1446 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1465 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
@@ -1454,7 +1473,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1454 return err; 1473 return err;
1455 1474
1456 if (p->index) 1475 if (p->index)
1457 xp = xfrm_policy_byid(net, type, p->dir, p->index, delete, &err); 1476 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1458 else { 1477 else {
1459 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1478 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1460 struct xfrm_sec_ctx *ctx; 1479 struct xfrm_sec_ctx *ctx;
@@ -1471,8 +1490,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1471 if (err) 1490 if (err)
1472 return err; 1491 return err;
1473 } 1492 }
1474 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 1493 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1475 delete, &err); 1494 ctx, delete, &err);
1476 security_xfrm_policy_free(ctx); 1495 security_xfrm_policy_free(ctx);
1477 } 1496 }
1478 if (xp == NULL) 1497 if (xp == NULL)
@@ -1524,8 +1543,11 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1524 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1543 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1525 audit_info.secid = NETLINK_CB(skb).sid; 1544 audit_info.secid = NETLINK_CB(skb).sid;
1526 err = xfrm_state_flush(net, p->proto, &audit_info); 1545 err = xfrm_state_flush(net, p->proto, &audit_info);
1527 if (err) 1546 if (err) {
1547 if (err == -ESRCH) /* empty table */
1548 return 0;
1528 return err; 1549 return err;
1550 }
1529 c.data.proto = p->proto; 1551 c.data.proto = p->proto;
1530 c.event = nlh->nlmsg_type; 1552 c.event = nlh->nlmsg_type;
1531 c.seq = nlh->nlmsg_seq; 1553 c.seq = nlh->nlmsg_seq;
@@ -1541,6 +1563,7 @@ static inline size_t xfrm_aevent_msgsize(void)
1541 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 1563 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1542 + nla_total_size(sizeof(struct xfrm_replay_state)) 1564 + nla_total_size(sizeof(struct xfrm_replay_state))
1543 + nla_total_size(sizeof(struct xfrm_lifetime_cur)) 1565 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1566 + nla_total_size(sizeof(struct xfrm_mark))
1544 + nla_total_size(4) /* XFRM_AE_RTHR */ 1567 + nla_total_size(4) /* XFRM_AE_RTHR */
1545 + nla_total_size(4); /* XFRM_AE_ETHR */ 1568 + nla_total_size(4); /* XFRM_AE_ETHR */
1546} 1569}
@@ -1573,6 +1596,9 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
1573 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, 1596 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1574 x->replay_maxage * 10 / HZ); 1597 x->replay_maxage * 10 / HZ);
1575 1598
1599 if (xfrm_mark_put(skb, &x->mark))
1600 goto nla_put_failure;
1601
1576 return nlmsg_end(skb, nlh); 1602 return nlmsg_end(skb, nlh);
1577 1603
1578nla_put_failure: 1604nla_put_failure:
@@ -1588,6 +1614,8 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1588 struct sk_buff *r_skb; 1614 struct sk_buff *r_skb;
1589 int err; 1615 int err;
1590 struct km_event c; 1616 struct km_event c;
1617 u32 mark;
1618 struct xfrm_mark m;
1591 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1619 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1592 struct xfrm_usersa_id *id = &p->sa_id; 1620 struct xfrm_usersa_id *id = &p->sa_id;
1593 1621
@@ -1595,7 +1623,9 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1595 if (r_skb == NULL) 1623 if (r_skb == NULL)
1596 return -ENOMEM; 1624 return -ENOMEM;
1597 1625
1598 x = xfrm_state_lookup(net, &id->daddr, id->spi, id->proto, id->family); 1626 mark = xfrm_mark_get(attrs, &m);
1627
1628 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1599 if (x == NULL) { 1629 if (x == NULL) {
1600 kfree_skb(r_skb); 1630 kfree_skb(r_skb);
1601 return -ESRCH; 1631 return -ESRCH;
@@ -1626,6 +1656,8 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1626 struct xfrm_state *x; 1656 struct xfrm_state *x;
1627 struct km_event c; 1657 struct km_event c;
1628 int err = - EINVAL; 1658 int err = - EINVAL;
1659 u32 mark = 0;
1660 struct xfrm_mark m;
1629 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1661 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1630 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1662 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1631 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1663 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
@@ -1637,7 +1669,9 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1637 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1669 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1638 return err; 1670 return err;
1639 1671
1640 x = xfrm_state_lookup(net, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1672 mark = xfrm_mark_get(attrs, &m);
1673
1674 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1641 if (x == NULL) 1675 if (x == NULL)
1642 return -ESRCH; 1676 return -ESRCH;
1643 1677
@@ -1676,8 +1710,12 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1676 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1710 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1677 audit_info.secid = NETLINK_CB(skb).sid; 1711 audit_info.secid = NETLINK_CB(skb).sid;
1678 err = xfrm_policy_flush(net, type, &audit_info); 1712 err = xfrm_policy_flush(net, type, &audit_info);
1679 if (err) 1713 if (err) {
1714 if (err == -ESRCH) /* empty table */
1715 return 0;
1680 return err; 1716 return err;
1717 }
1718
1681 c.data.type = type; 1719 c.data.type = type;
1682 c.event = nlh->nlmsg_type; 1720 c.event = nlh->nlmsg_type;
1683 c.seq = nlh->nlmsg_seq; 1721 c.seq = nlh->nlmsg_seq;
@@ -1696,13 +1734,15 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1696 struct xfrm_userpolicy_info *p = &up->pol; 1734 struct xfrm_userpolicy_info *p = &up->pol;
1697 u8 type = XFRM_POLICY_TYPE_MAIN; 1735 u8 type = XFRM_POLICY_TYPE_MAIN;
1698 int err = -ENOENT; 1736 int err = -ENOENT;
1737 struct xfrm_mark m;
1738 u32 mark = xfrm_mark_get(attrs, &m);
1699 1739
1700 err = copy_from_user_policy_type(&type, attrs); 1740 err = copy_from_user_policy_type(&type, attrs);
1701 if (err) 1741 if (err)
1702 return err; 1742 return err;
1703 1743
1704 if (p->index) 1744 if (p->index)
1705 xp = xfrm_policy_byid(net, type, p->dir, p->index, 0, &err); 1745 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1706 else { 1746 else {
1707 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1747 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1708 struct xfrm_sec_ctx *ctx; 1748 struct xfrm_sec_ctx *ctx;
@@ -1719,7 +1759,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1719 if (err) 1759 if (err)
1720 return err; 1760 return err;
1721 } 1761 }
1722 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 0, &err); 1762 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1763 &p->sel, ctx, 0, &err);
1723 security_xfrm_policy_free(ctx); 1764 security_xfrm_policy_free(ctx);
1724 } 1765 }
1725 if (xp == NULL) 1766 if (xp == NULL)
@@ -1759,8 +1800,10 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1759 int err; 1800 int err;
1760 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1801 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1761 struct xfrm_usersa_info *p = &ue->state; 1802 struct xfrm_usersa_info *p = &ue->state;
1803 struct xfrm_mark m;
1804 u32 mark = xfrm_mark_get(attrs, &m);;
1762 1805
1763 x = xfrm_state_lookup(net, &p->id.daddr, p->id.spi, p->id.proto, p->family); 1806 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1764 1807
1765 err = -ENOENT; 1808 err = -ENOENT;
1766 if (x == NULL) 1809 if (x == NULL)
@@ -1794,6 +1837,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1794 struct xfrm_user_tmpl *ut; 1837 struct xfrm_user_tmpl *ut;
1795 int i; 1838 int i;
1796 struct nlattr *rt = attrs[XFRMA_TMPL]; 1839 struct nlattr *rt = attrs[XFRMA_TMPL];
1840 struct xfrm_mark mark;
1797 1841
1798 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 1842 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1799 struct xfrm_state *x = xfrm_state_alloc(net); 1843 struct xfrm_state *x = xfrm_state_alloc(net);
@@ -1802,6 +1846,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1802 if (!x) 1846 if (!x)
1803 goto nomem; 1847 goto nomem;
1804 1848
1849 xfrm_mark_get(attrs, &mark);
1850
1805 err = verify_newpolicy_info(&ua->policy); 1851 err = verify_newpolicy_info(&ua->policy);
1806 if (err) 1852 if (err)
1807 goto bad_policy; 1853 goto bad_policy;
@@ -1814,7 +1860,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1814 memcpy(&x->id, &ua->id, sizeof(ua->id)); 1860 memcpy(&x->id, &ua->id, sizeof(ua->id));
1815 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 1861 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1816 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 1862 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1817 1863 xp->mark.m = x->mark.m = mark.m;
1864 xp->mark.v = x->mark.v = mark.v;
1818 ut = nla_data(rt); 1865 ut = nla_data(rt);
1819 /* extract the templates and for each call km_key */ 1866 /* extract the templates and for each call km_key */
1820 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 1867 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
@@ -2054,6 +2101,10 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2054#undef XMSGSIZE 2101#undef XMSGSIZE
2055 2102
2056static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2103static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2104 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2105 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2106 [XFRMA_LASTUSED] = { .type = NLA_U64},
2107 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2057 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2108 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2058 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2109 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2059 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2110 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
@@ -2070,6 +2121,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2070 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2121 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2071 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2122 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2072 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2123 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2124 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2073}; 2125};
2074 2126
2075static struct xfrm_link { 2127static struct xfrm_link {
@@ -2149,7 +2201,8 @@ static void xfrm_netlink_rcv(struct sk_buff *skb)
2149 2201
2150static inline size_t xfrm_expire_msgsize(void) 2202static inline size_t xfrm_expire_msgsize(void)
2151{ 2203{
2152 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)); 2204 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2205 + nla_total_size(sizeof(struct xfrm_mark));
2153} 2206}
2154 2207
2155static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) 2208static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
@@ -2165,7 +2218,13 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
2165 copy_to_user_state(x, &ue->state); 2218 copy_to_user_state(x, &ue->state);
2166 ue->hard = (c->data.hard != 0) ? 1 : 0; 2219 ue->hard = (c->data.hard != 0) ? 1 : 0;
2167 2220
2221 if (xfrm_mark_put(skb, &x->mark))
2222 goto nla_put_failure;
2223
2168 return nlmsg_end(skb, nlh); 2224 return nlmsg_end(skb, nlh);
2225
2226nla_put_failure:
2227 return -EMSGSIZE;
2169} 2228}
2170 2229
2171static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c) 2230static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
@@ -2177,8 +2236,10 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2177 if (skb == NULL) 2236 if (skb == NULL)
2178 return -ENOMEM; 2237 return -ENOMEM;
2179 2238
2180 if (build_expire(skb, x, c) < 0) 2239 if (build_expire(skb, x, c) < 0) {
2181 BUG(); 2240 kfree_skb(skb);
2241 return -EMSGSIZE;
2242 }
2182 2243
2183 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2244 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2184} 2245}
@@ -2266,6 +2327,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2266 if (c->event == XFRM_MSG_DELSA) { 2327 if (c->event == XFRM_MSG_DELSA) {
2267 len += nla_total_size(headlen); 2328 len += nla_total_size(headlen);
2268 headlen = sizeof(*id); 2329 headlen = sizeof(*id);
2330 len += nla_total_size(sizeof(struct xfrm_mark));
2269 } 2331 }
2270 len += NLMSG_ALIGN(headlen); 2332 len += NLMSG_ALIGN(headlen);
2271 2333
@@ -2336,6 +2398,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2336{ 2398{
2337 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 2399 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2338 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2400 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2401 + nla_total_size(sizeof(struct xfrm_mark))
2339 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 2402 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2340 + userpolicy_type_attrsize(); 2403 + userpolicy_type_attrsize();
2341} 2404}
@@ -2368,9 +2431,12 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2368 goto nlmsg_failure; 2431 goto nlmsg_failure;
2369 if (copy_to_user_policy_type(xp->type, skb) < 0) 2432 if (copy_to_user_policy_type(xp->type, skb) < 0)
2370 goto nlmsg_failure; 2433 goto nlmsg_failure;
2434 if (xfrm_mark_put(skb, &xp->mark))
2435 goto nla_put_failure;
2371 2436
2372 return nlmsg_end(skb, nlh); 2437 return nlmsg_end(skb, nlh);
2373 2438
2439nla_put_failure:
2374nlmsg_failure: 2440nlmsg_failure:
2375 nlmsg_cancel(skb, nlh); 2441 nlmsg_cancel(skb, nlh);
2376 return -EMSGSIZE; 2442 return -EMSGSIZE;
@@ -2457,6 +2523,7 @@ static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2457 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 2523 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2458 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2524 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2459 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 2525 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2526 + nla_total_size(sizeof(struct xfrm_mark))
2460 + userpolicy_type_attrsize(); 2527 + userpolicy_type_attrsize();
2461} 2528}
2462 2529
@@ -2479,10 +2546,13 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2479 goto nlmsg_failure; 2546 goto nlmsg_failure;
2480 if (copy_to_user_policy_type(xp->type, skb) < 0) 2547 if (copy_to_user_policy_type(xp->type, skb) < 0)
2481 goto nlmsg_failure; 2548 goto nlmsg_failure;
2549 if (xfrm_mark_put(skb, &xp->mark))
2550 goto nla_put_failure;
2482 upe->hard = !!hard; 2551 upe->hard = !!hard;
2483 2552
2484 return nlmsg_end(skb, nlh); 2553 return nlmsg_end(skb, nlh);
2485 2554
2555nla_put_failure:
2486nlmsg_failure: 2556nlmsg_failure:
2487 nlmsg_cancel(skb, nlh); 2557 nlmsg_cancel(skb, nlh);
2488 return -EMSGSIZE; 2558 return -EMSGSIZE;
@@ -2519,6 +2589,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
2519 headlen = sizeof(*id); 2589 headlen = sizeof(*id);
2520 } 2590 }
2521 len += userpolicy_type_attrsize(); 2591 len += userpolicy_type_attrsize();
2592 len += nla_total_size(sizeof(struct xfrm_mark));
2522 len += NLMSG_ALIGN(headlen); 2593 len += NLMSG_ALIGN(headlen);
2523 2594
2524 skb = nlmsg_new(len, GFP_ATOMIC); 2595 skb = nlmsg_new(len, GFP_ATOMIC);
@@ -2554,10 +2625,14 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
2554 if (copy_to_user_policy_type(xp->type, skb) < 0) 2625 if (copy_to_user_policy_type(xp->type, skb) < 0)
2555 goto nlmsg_failure; 2626 goto nlmsg_failure;
2556 2627
2628 if (xfrm_mark_put(skb, &xp->mark))
2629 goto nla_put_failure;
2630
2557 nlmsg_end(skb, nlh); 2631 nlmsg_end(skb, nlh);
2558 2632
2559 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2633 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2560 2634
2635nla_put_failure:
2561nlmsg_failure: 2636nlmsg_failure:
2562 kfree_skb(skb); 2637 kfree_skb(skb);
2563 return -1; 2638 return -1;