aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/Kconfig7
-rw-r--r--net/802/Makefile2
-rw-r--r--net/802/garp.c636
-rw-r--r--net/802/stp.c102
-rw-r--r--net/8021q/Kconfig10
-rw-r--r--net/8021q/Makefile12
-rw-r--r--net/8021q/vlan.c80
-rw-r--r--net/8021q/vlan.h86
-rw-r--r--net/8021q/vlan_core.c64
-rw-r--r--net/8021q/vlan_dev.c298
-rw-r--r--net/8021q/vlan_gvrp.c66
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/8021q/vlanproc.c13
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile4
-rw-r--r--net/atm/addr.c10
-rw-r--r--net/atm/addr.h4
-rw-r--r--net/atm/br2684.c14
-rw-r--r--net/atm/common.c8
-rw-r--r--net/atm/lec.c55
-rw-r--r--net/atm/lec.h10
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/ax25/ax25_std_timer.c8
-rw-r--r--net/bluetooth/bnep/bnep.h4
-rw-r--r--net/bluetooth/bnep/core.c4
-rw-r--r--net/bluetooth/bnep/netdev.c4
-rw-r--r--net/bluetooth/bnep/sock.c4
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br.c18
-rw-r--r--net/bridge/br_device.c14
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_forward.c6
-rw-r--r--net/bridge/br_if.c12
-rw-r--r--net/bridge/br_input.c25
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_notify.c2
-rw-r--r--net/bridge/br_private.h10
-rw-r--r--net/bridge/br_private_stp.h2
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_bpdu.c14
-rw-r--r--net/bridge/br_stp_if.c6
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/netfilter/Kconfig11
-rw-r--r--net/bridge/netfilter/Makefile1
-rw-r--r--net/bridge/netfilter/ebt_ip6.c144
-rw-r--r--net/bridge/netfilter/ebt_log.c66
-rw-r--r--net/core/dev.c380
-rw-r--r--net/core/dev_mcast.c24
-rw-r--r--net/core/ethtool.c37
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/link_watch.c11
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/net-sysfs.c26
-rw-r--r--net/core/netpoll.c24
-rw-r--r--net/core/pktgen.c69
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/skbuff.c166
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/sysctl_net_core.c39
-rw-r--r--net/dccp/ccids/ccid3.c14
-rw-r--r--net/dccp/ccids/lib/loss_interval.c10
-rw-r--r--net/dccp/ccids/lib/packet_history.c103
-rw-r--r--net/dccp/ccids/lib/packet_history.h30
-rw-r--r--net/dccp/dccp.h17
-rw-r--r--net/dccp/ipv4.c19
-rw-r--r--net/dccp/ipv6.c15
-rw-r--r--net/dccp/options.c14
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/af_decnet.c32
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ieee80211/ieee80211_rx.c2
-rw-r--r--net/ieee80211/ieee80211_tx.c86
-rw-r--r--net/ieee80211/ieee80211_wx.c137
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv4/arp.c9
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c35
-rw-r--r--net/ipv4/fib_frontend.c19
-rw-r--r--net/ipv4/fib_hash.c8
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c8
-rw-r--r--net/ipv4/icmp.c24
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c13
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_hashtables.c23
-rw-r--r--net/ipv4/inet_timewait_sock.c21
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_forward.c11
-rw-r--r--net/ipv4/ip_fragment.c61
-rw-r--r--net/ipv4/ip_gre.c32
-rw-r--r--net/ipv4/ip_input.c32
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ip_output.c35
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c24
-rw-r--r--net/ipv4/ipmr.c123
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_esp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c433
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/netfilter/Kconfig15
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_queue.c3
-rw-r--r--net/ipv4/netfilter/iptable_security.c180
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c5
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/protocol.c2
-rw-r--r--net/ipv4/raw.c12
-rw-r--r--net/ipv4/route.c262
-rw-r--r--net/ipv4/syncookies.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp.c112
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_input.c180
-rw-r--r--net/ipv4/tcp_ipv4.c215
-rw-r--r--net/ipv4/tcp_minisocks.c12
-rw-r--r--net/ipv4/tcp_output.c28
-rw-r--r--net/ipv4/tcp_timer.c27
-rw-r--r--net/ipv4/udp.c73
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv6/addrconf.c87
-rw-r--r--net/ipv6/addrlabel.c106
-rw-r--r--net/ipv6/af_inet6.c17
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/inet6_hashtables.c13
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c5
-rw-r--r--net/ipv6/ip6_output.c15
-rw-r--r--net/ipv6/ip6_tunnel.c28
-rw-r--r--net/ipv6/ip6mr.c79
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c3
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c31
-rw-r--r--net/ipv6/netfilter/ip6table_security.c172
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c5
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/protocol.c2
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/reassembly.c63
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/sit.c26
-rw-r--r--net/ipv6/syncookies.c6
-rw-r--r--net/ipv6/sysctl_net_ipv6.c29
-rw-r--r--net/ipv6/tcp_ipv6.c174
-rw-r--r--net/ipv6/udp.c53
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c54
-rw-r--r--net/irda/irnet/irnet_ppp.h7
-rw-r--r--net/iucv/af_iucv.c1
-rw-r--r--net/iucv/iucv.c25
-rw-r--r--net/key/af_key.c622
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/Kconfig164
-rw-r--r--net/mac80211/Makefile20
-rw-r--r--net/mac80211/aes_ccm.c53
-rw-r--r--net/mac80211/aes_ccm.h6
-rw-r--r--net/mac80211/cfg.c42
-rw-r--r--net/mac80211/debugfs.c58
-rw-r--r--net/mac80211/debugfs_key.c8
-rw-r--r--net/mac80211/debugfs_netdev.c59
-rw-r--r--net/mac80211/debugfs_netdev.h5
-rw-r--r--net/mac80211/debugfs_sta.c56
-rw-r--r--net/mac80211/ieee80211_i.h179
-rw-r--r--net/mac80211/iface.c396
-rw-r--r--net/mac80211/key.c11
-rw-r--r--net/mac80211/key.h54
-rw-r--r--net/mac80211/main.c609
-rw-r--r--net/mac80211/mesh.c38
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c54
-rw-r--r--net/mac80211/mesh_plink.c88
-rw-r--r--net/mac80211/michael.c116
-rw-r--r--net/mac80211/michael.h8
-rw-r--r--net/mac80211/mlme.c1035
-rw-r--r--net/mac80211/rate.c12
-rw-r--r--net/mac80211/rate.h37
-rw-r--r--net/mac80211/rc80211_pid.h4
-rw-r--r--net/mac80211/rc80211_pid_algo.c40
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c8
-rw-r--r--net/mac80211/rx.c683
-rw-r--r--net/mac80211/sta_info.c55
-rw-r--r--net/mac80211/sta_info.h168
-rw-r--r--net/mac80211/tkip.c282
-rw-r--r--net/mac80211/tkip.h8
-rw-r--r--net/mac80211/tx.c1167
-rw-r--r--net/mac80211/util.c136
-rw-r--r--net/mac80211/wep.c71
-rw-r--r--net/mac80211/wep.h2
-rw-r--r--net/mac80211/wext.c145
-rw-r--r--net/mac80211/wme.c676
-rw-r--r--net/mac80211/wme.h43
-rw-r--r--net/mac80211/wpa.c403
-rw-r--r--net/netfilter/Kconfig5
-rw-r--r--net/netfilter/nf_conntrack_core.c29
-rw-r--r--net/netfilter/nf_conntrack_extend.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c32
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c80
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c11
-rw-r--r--net/netfilter/nfnetlink_queue.c1
-rw-r--r--net/netfilter/xt_CONNSECMARK.c10
-rw-r--r--net/netfilter/xt_SECMARK.c10
-rw-r--r--net/netfilter/xt_string.c38
-rw-r--r--net/netlabel/netlabel_cipso_v4.c2
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c22
-rw-r--r--net/packet/af_packet.c197
-rw-r--r--net/rfkill/rfkill-input.c98
-rw-r--r--net/rfkill/rfkill-input.h1
-rw-r--r--net/rfkill/rfkill.c314
-rw-r--r--net/rose/af_rose.c24
-rw-r--r--net/rose/rose_route.c29
-rw-r--r--net/rxrpc/ar-input.c5
-rw-r--r--net/sched/cls_api.c20
-rw-r--r--net/sched/cls_flow.c52
-rw-r--r--net/sched/cls_route.c12
-rw-r--r--net/sched/sch_api.c291
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c136
-rw-r--r--net/sched/sch_dsmark.c6
-rw-r--r--net/sched/sch_fifo.c47
-rw-r--r--net/sched/sch_generic.c459
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_hfsc.c94
-rw-r--r--net/sched/sch_htb.c183
-rw-r--r--net/sched/sch_netem.c38
-rw-r--r--net/sched/sch_prio.c138
-rw-r--r--net/sched/sch_red.c33
-rw-r--r--net/sched/sch_sfq.c4
-rw-r--r--net/sched/sch_tbf.c33
-rw-r--r--net/sched/sch_teql.c44
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/input.c34
-rw-r--r--net/sctp/output.c10
-rw-r--r--net/sctp/outqueue.c34
-rw-r--r--net/sctp/proc.c141
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/sm_sideeffect.c44
-rw-r--r--net/sctp/sm_statefuns.c16
-rw-r--r--net/sctp/socket.c323
-rw-r--r--net/socket.c10
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sysctl_net.c31
-rw-r--r--net/tipc/bcast.c14
-rw-r--r--net/tipc/bearer.c8
-rw-r--r--net/tipc/cluster.c4
-rw-r--r--net/tipc/config.c11
-rw-r--r--net/tipc/core.c13
-rw-r--r--net/tipc/core.h126
-rw-r--r--net/tipc/dbg.c231
-rw-r--r--net/tipc/dbg.h12
-rw-r--r--net/tipc/discover.c14
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/eth_media.c6
-rw-r--r--net/tipc/link.c98
-rw-r--r--net/tipc/msg.c13
-rw-r--r--net/tipc/msg.h42
-rw-r--r--net/tipc/name_distr.c6
-rw-r--r--net/tipc/name_table.c55
-rw-r--r--net/tipc/net.c14
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/netlink.c16
-rw-r--r--net/tipc/node.c55
-rw-r--r--net/tipc/port.c115
-rw-r--r--net/tipc/ref.c14
-rw-r--r--net/tipc/socket.c62
-rw-r--r--net/tipc/subscr.c249
-rw-r--r--net/tipc/subscr.h34
-rw-r--r--net/tipc/user_reg.c14
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wanrouter/Kconfig2
-rw-r--r--net/wanrouter/wanmain.c6
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/wireless/Kconfig11
-rw-r--r--net/wireless/core.c33
-rw-r--r--net/wireless/nl80211.c6
-rw-r--r--net/wireless/radiotap.c16
-rw-r--r--net/wireless/wext.c582
-rw-r--r--net/x25/af_x25.c9
315 files changed, 10570 insertions, 8517 deletions
diff --git a/net/802/Kconfig b/net/802/Kconfig
new file mode 100644
index 000000000000..be33d27c8e69
--- /dev/null
+++ b/net/802/Kconfig
@@ -0,0 +1,7 @@
1config STP
2 tristate
3 select LLC
4
5config GARP
6 tristate
7 select STP
diff --git a/net/802/Makefile b/net/802/Makefile
index 68569ffddea1..7893d679910c 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_FDDI) += fddi.o
10obj-$(CONFIG_HIPPI) += hippi.o 10obj-$(CONFIG_HIPPI) += hippi.o
11obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o 11obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
12obj-$(CONFIG_ATALK) += p8022.o psnap.o 12obj-$(CONFIG_ATALK) += p8022.o psnap.o
13obj-$(CONFIG_STP) += stp.o
14obj-$(CONFIG_GARP) += garp.o
diff --git a/net/802/garp.c b/net/802/garp.c
new file mode 100644
index 000000000000..1dcb0660c49d
--- /dev/null
+++ b/net/802/garp.c
@@ -0,0 +1,636 @@
1/*
2 * IEEE 802.1D Generic Attribute Registration Protocol (GARP)
3 *
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/timer.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/llc.h>
17#include <net/llc.h>
18#include <net/llc_pdu.h>
19#include <net/garp.h>
20#include <asm/unaligned.h>
21
22static unsigned int garp_join_time __read_mostly = 200;
23module_param(garp_join_time, uint, 0644);
24MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)");
25MODULE_LICENSE("GPL");
26
27static const struct garp_state_trans {
28 u8 state;
29 u8 action;
30} garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = {
31 [GARP_APPLICANT_VA] = {
32 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
33 .action = GARP_ACTION_S_JOIN_IN },
34 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA },
35 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
36 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
37 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
38 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
39 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
40 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
41 },
42 [GARP_APPLICANT_AA] = {
43 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
44 .action = GARP_ACTION_S_JOIN_IN },
45 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
46 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
47 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
48 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
49 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
50 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
51 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
52 },
53 [GARP_APPLICANT_QA] = {
54 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
55 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
56 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
57 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
58 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
59 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
60 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
61 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
62 },
63 [GARP_APPLICANT_LA] = {
64 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO,
65 .action = GARP_ACTION_S_LEAVE_EMPTY },
66 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA },
67 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
68 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA },
69 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA },
70 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
71 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA },
72 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
73 },
74 [GARP_APPLICANT_VP] = {
75 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
76 .action = GARP_ACTION_S_JOIN_IN },
77 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP },
78 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
79 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
80 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
81 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
82 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
83 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO },
84 },
85 [GARP_APPLICANT_AP] = {
86 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
87 .action = GARP_ACTION_S_JOIN_IN },
88 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
89 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
90 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
91 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
92 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
93 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
94 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO },
95 },
96 [GARP_APPLICANT_QP] = {
97 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
98 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
99 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
100 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
101 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
102 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
103 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
104 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO },
105 },
106 [GARP_APPLICANT_VO] = {
107 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
108 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO },
109 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
110 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
111 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
112 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
113 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP },
114 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
115 },
116 [GARP_APPLICANT_AO] = {
117 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
118 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
119 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
120 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
121 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
122 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
123 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP },
124 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
125 },
126 [GARP_APPLICANT_QO] = {
127 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
128 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
129 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
130 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
131 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
132 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
133 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP },
134 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
135 },
136};
137
138static int garp_attr_cmp(const struct garp_attr *attr,
139 const void *data, u8 len, u8 type)
140{
141 if (attr->type != type)
142 return attr->type - type;
143 if (attr->dlen != len)
144 return attr->dlen - len;
145 return memcmp(attr->data, data, len);
146}
147
148static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
149 const void *data, u8 len, u8 type)
150{
151 struct rb_node *parent = app->gid.rb_node;
152 struct garp_attr *attr;
153 int d;
154
155 while (parent) {
156 attr = rb_entry(parent, struct garp_attr, node);
157 d = garp_attr_cmp(attr, data, len, type);
158 if (d < 0)
159 parent = parent->rb_left;
160 else if (d > 0)
161 parent = parent->rb_right;
162 else
163 return attr;
164 }
165 return NULL;
166}
167
168static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new)
169{
170 struct rb_node *parent = NULL, **p = &app->gid.rb_node;
171 struct garp_attr *attr;
172 int d;
173
174 while (*p) {
175 parent = *p;
176 attr = rb_entry(parent, struct garp_attr, node);
177 d = garp_attr_cmp(attr, new->data, new->dlen, new->type);
178 if (d < 0)
179 p = &parent->rb_left;
180 else if (d > 0)
181 p = &parent->rb_right;
182 }
183 rb_link_node(&new->node, parent, p);
184 rb_insert_color(&new->node, &app->gid);
185}
186
187static struct garp_attr *garp_attr_create(struct garp_applicant *app,
188 const void *data, u8 len, u8 type)
189{
190 struct garp_attr *attr;
191
192 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
193 if (!attr)
194 return attr;
195 attr->state = GARP_APPLICANT_VO;
196 attr->type = type;
197 attr->dlen = len;
198 memcpy(attr->data, data, len);
199 garp_attr_insert(app, attr);
200 return attr;
201}
202
203static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr)
204{
205 rb_erase(&attr->node, &app->gid);
206 kfree(attr);
207}
208
209static int garp_pdu_init(struct garp_applicant *app)
210{
211 struct sk_buff *skb;
212 struct garp_pdu_hdr *gp;
213
214#define LLC_RESERVE sizeof(struct llc_pdu_un)
215 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
216 GFP_ATOMIC);
217 if (!skb)
218 return -ENOMEM;
219
220 skb->dev = app->dev;
221 skb->protocol = htons(ETH_P_802_2);
222 skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE);
223
224 gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp));
225 put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol);
226
227 app->pdu = skb;
228 return 0;
229}
230
231static int garp_pdu_append_end_mark(struct garp_applicant *app)
232{
233 if (skb_tailroom(app->pdu) < sizeof(u8))
234 return -1;
235 *(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK;
236 return 0;
237}
238
239static void garp_pdu_queue(struct garp_applicant *app)
240{
241 if (!app->pdu)
242 return;
243
244 garp_pdu_append_end_mark(app);
245 garp_pdu_append_end_mark(app);
246
247 llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
248 LLC_SAP_BSPAN, LLC_PDU_CMD);
249 llc_pdu_init_as_ui_cmd(app->pdu);
250 llc_mac_hdr_init(app->pdu, app->dev->dev_addr,
251 app->app->proto.group_address);
252
253 skb_queue_tail(&app->queue, app->pdu);
254 app->pdu = NULL;
255}
256
257static void garp_queue_xmit(struct garp_applicant *app)
258{
259 struct sk_buff *skb;
260
261 while ((skb = skb_dequeue(&app->queue)))
262 dev_queue_xmit(skb);
263}
264
265static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype)
266{
267 struct garp_msg_hdr *gm;
268
269 if (skb_tailroom(app->pdu) < sizeof(*gm))
270 return -1;
271 gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm));
272 gm->attrtype = attrtype;
273 garp_cb(app->pdu)->cur_type = attrtype;
274 return 0;
275}
276
277static int garp_pdu_append_attr(struct garp_applicant *app,
278 const struct garp_attr *attr,
279 enum garp_attr_event event)
280{
281 struct garp_attr_hdr *ga;
282 unsigned int len;
283 int err;
284again:
285 if (!app->pdu) {
286 err = garp_pdu_init(app);
287 if (err < 0)
288 return err;
289 }
290
291 if (garp_cb(app->pdu)->cur_type != attr->type) {
292 if (garp_cb(app->pdu)->cur_type &&
293 garp_pdu_append_end_mark(app) < 0)
294 goto queue;
295 if (garp_pdu_append_msg(app, attr->type) < 0)
296 goto queue;
297 }
298
299 len = sizeof(*ga) + attr->dlen;
300 if (skb_tailroom(app->pdu) < len)
301 goto queue;
302 ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len);
303 ga->len = len;
304 ga->event = event;
305 memcpy(ga->data, attr->data, attr->dlen);
306 return 0;
307
308queue:
309 garp_pdu_queue(app);
310 goto again;
311}
312
313static void garp_attr_event(struct garp_applicant *app,
314 struct garp_attr *attr, enum garp_event event)
315{
316 enum garp_applicant_state state;
317
318 state = garp_applicant_state_table[attr->state][event].state;
319 if (state == GARP_APPLICANT_INVALID)
320 return;
321
322 switch (garp_applicant_state_table[attr->state][event].action) {
323 case GARP_ACTION_NONE:
324 break;
325 case GARP_ACTION_S_JOIN_IN:
326 /* When appending the attribute fails, don't update state in
327 * order to retry on next TRANSMIT_PDU event. */
328 if (garp_pdu_append_attr(app, attr, GARP_JOIN_IN) < 0)
329 return;
330 break;
331 case GARP_ACTION_S_LEAVE_EMPTY:
332 garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY);
333 /* As a pure applicant, sending a leave message implies that
334 * the attribute was unregistered and can be destroyed. */
335 garp_attr_destroy(app, attr);
336 return;
337 default:
338 WARN_ON(1);
339 }
340
341 attr->state = state;
342}
343
344int garp_request_join(const struct net_device *dev,
345 const struct garp_application *appl,
346 const void *data, u8 len, u8 type)
347{
348 struct garp_port *port = dev->garp_port;
349 struct garp_applicant *app = port->applicants[appl->type];
350 struct garp_attr *attr;
351
352 spin_lock_bh(&app->lock);
353 attr = garp_attr_create(app, data, len, type);
354 if (!attr) {
355 spin_unlock_bh(&app->lock);
356 return -ENOMEM;
357 }
358 garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN);
359 spin_unlock_bh(&app->lock);
360 return 0;
361}
362EXPORT_SYMBOL_GPL(garp_request_join);
363
364void garp_request_leave(const struct net_device *dev,
365 const struct garp_application *appl,
366 const void *data, u8 len, u8 type)
367{
368 struct garp_port *port = dev->garp_port;
369 struct garp_applicant *app = port->applicants[appl->type];
370 struct garp_attr *attr;
371
372 spin_lock_bh(&app->lock);
373 attr = garp_attr_lookup(app, data, len, type);
374 if (!attr) {
375 spin_unlock_bh(&app->lock);
376 return;
377 }
378 garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE);
379 spin_unlock_bh(&app->lock);
380}
381EXPORT_SYMBOL_GPL(garp_request_leave);
382
383static void garp_gid_event(struct garp_applicant *app, enum garp_event event)
384{
385 struct rb_node *node, *next;
386 struct garp_attr *attr;
387
388 for (node = rb_first(&app->gid);
389 next = node ? rb_next(node) : NULL, node != NULL;
390 node = next) {
391 attr = rb_entry(node, struct garp_attr, node);
392 garp_attr_event(app, attr, event);
393 }
394}
395
396static void garp_join_timer_arm(struct garp_applicant *app)
397{
398 unsigned long delay;
399
400 delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32;
401 mod_timer(&app->join_timer, jiffies + delay);
402}
403
404static void garp_join_timer(unsigned long data)
405{
406 struct garp_applicant *app = (struct garp_applicant *)data;
407
408 spin_lock(&app->lock);
409 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
410 garp_pdu_queue(app);
411 spin_unlock(&app->lock);
412
413 garp_queue_xmit(app);
414 garp_join_timer_arm(app);
415}
416
417static int garp_pdu_parse_end_mark(struct sk_buff *skb)
418{
419 if (!pskb_may_pull(skb, sizeof(u8)))
420 return -1;
421 if (*skb->data == GARP_END_MARK) {
422 skb_pull(skb, sizeof(u8));
423 return -1;
424 }
425 return 0;
426}
427
428static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb,
429 u8 attrtype)
430{
431 const struct garp_attr_hdr *ga;
432 struct garp_attr *attr;
433 enum garp_event event;
434 unsigned int dlen;
435
436 if (!pskb_may_pull(skb, sizeof(*ga)))
437 return -1;
438 ga = (struct garp_attr_hdr *)skb->data;
439 if (ga->len < sizeof(*ga))
440 return -1;
441
442 if (!pskb_may_pull(skb, ga->len))
443 return -1;
444 skb_pull(skb, ga->len);
445 dlen = sizeof(*ga) - ga->len;
446
447 if (attrtype > app->app->maxattr)
448 return 0;
449
450 switch (ga->event) {
451 case GARP_LEAVE_ALL:
452 if (dlen != 0)
453 return -1;
454 garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY);
455 return 0;
456 case GARP_JOIN_EMPTY:
457 event = GARP_EVENT_R_JOIN_EMPTY;
458 break;
459 case GARP_JOIN_IN:
460 event = GARP_EVENT_R_JOIN_IN;
461 break;
462 case GARP_LEAVE_EMPTY:
463 event = GARP_EVENT_R_LEAVE_EMPTY;
464 break;
465 case GARP_EMPTY:
466 event = GARP_EVENT_R_EMPTY;
467 break;
468 default:
469 return 0;
470 }
471
472 if (dlen == 0)
473 return -1;
474 attr = garp_attr_lookup(app, ga->data, dlen, attrtype);
475 if (attr == NULL)
476 return 0;
477 garp_attr_event(app, attr, event);
478 return 0;
479}
480
481static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb)
482{
483 const struct garp_msg_hdr *gm;
484
485 if (!pskb_may_pull(skb, sizeof(*gm)))
486 return -1;
487 gm = (struct garp_msg_hdr *)skb->data;
488 if (gm->attrtype == 0)
489 return -1;
490 skb_pull(skb, sizeof(*gm));
491
492 while (skb->len > 0) {
493 if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0)
494 return -1;
495 if (garp_pdu_parse_end_mark(skb) < 0)
496 break;
497 }
498 return 0;
499}
500
501static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb,
502 struct net_device *dev)
503{
504 struct garp_application *appl = proto->data;
505 struct garp_port *port;
506 struct garp_applicant *app;
507 const struct garp_pdu_hdr *gp;
508
509 port = rcu_dereference(dev->garp_port);
510 if (!port)
511 goto err;
512 app = rcu_dereference(port->applicants[appl->type]);
513 if (!app)
514 goto err;
515
516 if (!pskb_may_pull(skb, sizeof(*gp)))
517 goto err;
518 gp = (struct garp_pdu_hdr *)skb->data;
519 if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID))
520 goto err;
521 skb_pull(skb, sizeof(*gp));
522
523 spin_lock(&app->lock);
524 while (skb->len > 0) {
525 if (garp_pdu_parse_msg(app, skb) < 0)
526 break;
527 if (garp_pdu_parse_end_mark(skb) < 0)
528 break;
529 }
530 spin_unlock(&app->lock);
531err:
532 kfree_skb(skb);
533}
534
535static int garp_init_port(struct net_device *dev)
536{
537 struct garp_port *port;
538
539 port = kzalloc(sizeof(*port), GFP_KERNEL);
540 if (!port)
541 return -ENOMEM;
542 rcu_assign_pointer(dev->garp_port, port);
543 return 0;
544}
545
546static void garp_release_port(struct net_device *dev)
547{
548 struct garp_port *port = dev->garp_port;
549 unsigned int i;
550
551 for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
552 if (port->applicants[i])
553 return;
554 }
555 rcu_assign_pointer(dev->garp_port, NULL);
556 synchronize_rcu();
557 kfree(port);
558}
559
560int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
561{
562 struct garp_applicant *app;
563 int err;
564
565 ASSERT_RTNL();
566
567 if (!dev->garp_port) {
568 err = garp_init_port(dev);
569 if (err < 0)
570 goto err1;
571 }
572
573 err = -ENOMEM;
574 app = kzalloc(sizeof(*app), GFP_KERNEL);
575 if (!app)
576 goto err2;
577
578 err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0);
579 if (err < 0)
580 goto err3;
581
582 app->dev = dev;
583 app->app = appl;
584 app->gid = RB_ROOT;
585 spin_lock_init(&app->lock);
586 skb_queue_head_init(&app->queue);
587 rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
588 setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app);
589 garp_join_timer_arm(app);
590 return 0;
591
592err3:
593 kfree(app);
594err2:
595 garp_release_port(dev);
596err1:
597 return err;
598}
599EXPORT_SYMBOL_GPL(garp_init_applicant);
600
601void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
602{
603 struct garp_port *port = dev->garp_port;
604 struct garp_applicant *app = port->applicants[appl->type];
605
606 ASSERT_RTNL();
607
608 rcu_assign_pointer(port->applicants[appl->type], NULL);
609 synchronize_rcu();
610
611 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
612 * all pending messages before the applicant is gone. */
613 del_timer_sync(&app->join_timer);
614 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
615 garp_pdu_queue(app);
616 garp_queue_xmit(app);
617
618 dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0);
619 kfree(app);
620 garp_release_port(dev);
621}
622EXPORT_SYMBOL_GPL(garp_uninit_applicant);
623
624int garp_register_application(struct garp_application *appl)
625{
626 appl->proto.rcv = garp_pdu_rcv;
627 appl->proto.data = appl;
628 return stp_proto_register(&appl->proto);
629}
630EXPORT_SYMBOL_GPL(garp_register_application);
631
632void garp_unregister_application(struct garp_application *appl)
633{
634 stp_proto_unregister(&appl->proto);
635}
636EXPORT_SYMBOL_GPL(garp_unregister_application);
diff --git a/net/802/stp.c b/net/802/stp.c
new file mode 100644
index 000000000000..0b7a24452d11
--- /dev/null
+++ b/net/802/stp.c
@@ -0,0 +1,102 @@
1/*
2 * STP SAP demux
3 *
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10#include <linux/mutex.h>
11#include <linux/skbuff.h>
12#include <linux/etherdevice.h>
13#include <linux/llc.h>
14#include <net/llc.h>
15#include <net/llc_pdu.h>
16#include <net/stp.h>
17
18/* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */
19#define GARP_ADDR_MIN 0x20
20#define GARP_ADDR_MAX 0x2F
21#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
22
23static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
24static const struct stp_proto *stp_proto __read_mostly;
25
26static struct llc_sap *sap __read_mostly;
27static unsigned int sap_registered;
28static DEFINE_MUTEX(stp_proto_mutex);
29
30/* Called under rcu_read_lock from LLC */
31static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
32 struct packet_type *pt, struct net_device *orig_dev)
33{
34 const struct ethhdr *eh = eth_hdr(skb);
35 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
36 const struct stp_proto *proto;
37
38 if (pdu->ssap != LLC_SAP_BSPAN ||
39 pdu->dsap != LLC_SAP_BSPAN ||
40 pdu->ctrl_1 != LLC_PDU_TYPE_U)
41 goto err;
42
43 if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) {
44 proto = rcu_dereference(garp_protos[eh->h_dest[5] -
45 GARP_ADDR_MIN]);
46 if (proto &&
47 compare_ether_addr(eh->h_dest, proto->group_address))
48 goto err;
49 } else
50 proto = rcu_dereference(stp_proto);
51
52 if (!proto)
53 goto err;
54
55 proto->rcv(proto, skb, dev);
56 return 0;
57
58err:
59 kfree_skb(skb);
60 return 0;
61}
62
63int stp_proto_register(const struct stp_proto *proto)
64{
65 int err = 0;
66
67 mutex_lock(&stp_proto_mutex);
68 if (sap_registered++ == 0) {
69 sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv);
70 if (!sap) {
71 err = -ENOMEM;
72 goto out;
73 }
74 }
75 if (is_zero_ether_addr(proto->group_address))
76 rcu_assign_pointer(stp_proto, proto);
77 else
78 rcu_assign_pointer(garp_protos[proto->group_address[5] -
79 GARP_ADDR_MIN], proto);
80out:
81 mutex_unlock(&stp_proto_mutex);
82 return err;
83}
84EXPORT_SYMBOL_GPL(stp_proto_register);
85
86void stp_proto_unregister(const struct stp_proto *proto)
87{
88 mutex_lock(&stp_proto_mutex);
89 if (is_zero_ether_addr(proto->group_address))
90 rcu_assign_pointer(stp_proto, NULL);
91 else
92 rcu_assign_pointer(garp_protos[proto->group_address[5] -
93 GARP_ADDR_MIN], NULL);
94 synchronize_rcu();
95
96 if (--sap_registered == 0)
97 llc_sap_put(sap);
98 mutex_unlock(&stp_proto_mutex);
99}
100EXPORT_SYMBOL_GPL(stp_proto_unregister);
101
102MODULE_LICENSE("GPL");
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig
index c4a382e450e2..fa073a54963e 100644
--- a/net/8021q/Kconfig
+++ b/net/8021q/Kconfig
@@ -17,3 +17,13 @@ config VLAN_8021Q
17 will be called 8021q. 17 will be called 8021q.
18 18
19 If unsure, say N. 19 If unsure, say N.
20
21config VLAN_8021Q_GVRP
22 bool "GVRP (GARP VLAN Registration Protocol) support"
23 depends on VLAN_8021Q
24 select GARP
25 help
26 Select this to enable GVRP end-system support. GVRP is used for
27 automatic propagation of registered VLANs to switches.
28
29 If unsure, say N.
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
index 10ca7f486c3a..9f4f174ead1c 100644
--- a/net/8021q/Makefile
+++ b/net/8021q/Makefile
@@ -1,12 +1,10 @@
1# 1#
2# Makefile for the Linux VLAN layer. 2# Makefile for the Linux VLAN layer.
3# 3#
4obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o
5obj-$(CONFIG_VLAN_8021Q) += 8021q.o
4 6
5obj-$(CONFIG_VLAN_8021Q) += 8021q.o 78021q-y := vlan.o vlan_dev.o vlan_netlink.o
6 88021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
78021q-objs := vlan.o vlan_dev.o vlan_netlink.o 98021q-$(CONFIG_PROC_FS) += vlanproc.o
8
9ifeq ($(CONFIG_PROC_FS),y)
108021q-objs += vlanproc.o
11endif
12 10
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 08f14f6c5fd6..b661f47bf10a 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -18,22 +18,20 @@
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21#include <asm/uaccess.h> /* for copy_from_user */
22#include <linux/capability.h> 21#include <linux/capability.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include <linux/netdevice.h> 23#include <linux/netdevice.h>
25#include <linux/skbuff.h> 24#include <linux/skbuff.h>
26#include <net/datalink.h>
27#include <linux/mm.h>
28#include <linux/in.h>
29#include <linux/init.h> 25#include <linux/init.h>
30#include <linux/rculist.h> 26#include <linux/rculist.h>
31#include <net/p8022.h> 27#include <net/p8022.h>
32#include <net/arp.h> 28#include <net/arp.h>
33#include <linux/rtnetlink.h> 29#include <linux/rtnetlink.h>
34#include <linux/notifier.h> 30#include <linux/notifier.h>
31#include <net/rtnetlink.h>
35#include <net/net_namespace.h> 32#include <net/net_namespace.h>
36#include <net/netns/generic.h> 33#include <net/netns/generic.h>
34#include <asm/uaccess.h>
37 35
38#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
39#include "vlan.h" 37#include "vlan.h"
@@ -84,13 +82,12 @@ static struct vlan_group *__vlan_find_group(struct net_device *real_dev)
84 * 82 *
85 * Must be invoked with RCU read lock (no preempt) 83 * Must be invoked with RCU read lock (no preempt)
86 */ 84 */
87struct net_device *__find_vlan_dev(struct net_device *real_dev, 85struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id)
88 unsigned short VID)
89{ 86{
90 struct vlan_group *grp = __vlan_find_group(real_dev); 87 struct vlan_group *grp = __vlan_find_group(real_dev);
91 88
92 if (grp) 89 if (grp)
93 return vlan_group_get_device(grp, VID); 90 return vlan_group_get_device(grp, vlan_id);
94 91
95 return NULL; 92 return NULL;
96} 93}
@@ -118,14 +115,14 @@ static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
118 return grp; 115 return grp;
119} 116}
120 117
121static int vlan_group_prealloc_vid(struct vlan_group *vg, int vid) 118static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
122{ 119{
123 struct net_device **array; 120 struct net_device **array;
124 unsigned int size; 121 unsigned int size;
125 122
126 ASSERT_RTNL(); 123 ASSERT_RTNL();
127 124
128 array = vg->vlan_devices_arrays[vid / VLAN_GROUP_ARRAY_PART_LEN]; 125 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
129 if (array != NULL) 126 if (array != NULL)
130 return 0; 127 return 0;
131 128
@@ -134,7 +131,7 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, int vid)
134 if (array == NULL) 131 if (array == NULL)
135 return -ENOBUFS; 132 return -ENOBUFS;
136 133
137 vg->vlan_devices_arrays[vid / VLAN_GROUP_ARRAY_PART_LEN] = array; 134 vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array;
138 return 0; 135 return 0;
139} 136}
140 137
@@ -148,7 +145,7 @@ void unregister_vlan_dev(struct net_device *dev)
148 struct vlan_dev_info *vlan = vlan_dev_info(dev); 145 struct vlan_dev_info *vlan = vlan_dev_info(dev);
149 struct net_device *real_dev = vlan->real_dev; 146 struct net_device *real_dev = vlan->real_dev;
150 struct vlan_group *grp; 147 struct vlan_group *grp;
151 unsigned short vlan_id = vlan->vlan_id; 148 u16 vlan_id = vlan->vlan_id;
152 149
153 ASSERT_RTNL(); 150 ASSERT_RTNL();
154 151
@@ -166,8 +163,12 @@ void unregister_vlan_dev(struct net_device *dev)
166 163
167 synchronize_net(); 164 synchronize_net();
168 165
166 unregister_netdevice(dev);
167
169 /* If the group is now empty, kill off the group. */ 168 /* If the group is now empty, kill off the group. */
170 if (grp->nr_vlans == 0) { 169 if (grp->nr_vlans == 0) {
170 vlan_gvrp_uninit_applicant(real_dev);
171
171 if (real_dev->features & NETIF_F_HW_VLAN_RX) 172 if (real_dev->features & NETIF_F_HW_VLAN_RX)
172 real_dev->vlan_rx_register(real_dev, NULL); 173 real_dev->vlan_rx_register(real_dev, NULL);
173 174
@@ -179,8 +180,6 @@ void unregister_vlan_dev(struct net_device *dev)
179 180
180 /* Get rid of the vlan's reference to real_dev */ 181 /* Get rid of the vlan's reference to real_dev */
181 dev_put(real_dev); 182 dev_put(real_dev);
182
183 unregister_netdevice(dev);
184} 183}
185 184
186static void vlan_transfer_operstate(const struct net_device *dev, 185static void vlan_transfer_operstate(const struct net_device *dev,
@@ -204,7 +203,7 @@ static void vlan_transfer_operstate(const struct net_device *dev,
204 } 203 }
205} 204}
206 205
207int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id) 206int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
208{ 207{
209 char *name = real_dev->name; 208 char *name = real_dev->name;
210 209
@@ -241,7 +240,7 @@ int register_vlan_dev(struct net_device *dev)
241{ 240{
242 struct vlan_dev_info *vlan = vlan_dev_info(dev); 241 struct vlan_dev_info *vlan = vlan_dev_info(dev);
243 struct net_device *real_dev = vlan->real_dev; 242 struct net_device *real_dev = vlan->real_dev;
244 unsigned short vlan_id = vlan->vlan_id; 243 u16 vlan_id = vlan->vlan_id;
245 struct vlan_group *grp, *ngrp = NULL; 244 struct vlan_group *grp, *ngrp = NULL;
246 int err; 245 int err;
247 246
@@ -250,15 +249,18 @@ int register_vlan_dev(struct net_device *dev)
250 ngrp = grp = vlan_group_alloc(real_dev); 249 ngrp = grp = vlan_group_alloc(real_dev);
251 if (!grp) 250 if (!grp)
252 return -ENOBUFS; 251 return -ENOBUFS;
252 err = vlan_gvrp_init_applicant(real_dev);
253 if (err < 0)
254 goto out_free_group;
253 } 255 }
254 256
255 err = vlan_group_prealloc_vid(grp, vlan_id); 257 err = vlan_group_prealloc_vid(grp, vlan_id);
256 if (err < 0) 258 if (err < 0)
257 goto out_free_group; 259 goto out_uninit_applicant;
258 260
259 err = register_netdevice(dev); 261 err = register_netdevice(dev);
260 if (err < 0) 262 if (err < 0)
261 goto out_free_group; 263 goto out_uninit_applicant;
262 264
263 /* Account for reference in struct vlan_dev_info */ 265 /* Account for reference in struct vlan_dev_info */
264 dev_hold(real_dev); 266 dev_hold(real_dev);
@@ -279,6 +281,9 @@ int register_vlan_dev(struct net_device *dev)
279 281
280 return 0; 282 return 0;
281 283
284out_uninit_applicant:
285 if (ngrp)
286 vlan_gvrp_uninit_applicant(real_dev);
282out_free_group: 287out_free_group:
283 if (ngrp) 288 if (ngrp)
284 vlan_group_free(ngrp); 289 vlan_group_free(ngrp);
@@ -288,8 +293,7 @@ out_free_group:
288/* Attach a VLAN device to a mac address (ie Ethernet Card). 293/* Attach a VLAN device to a mac address (ie Ethernet Card).
289 * Returns 0 if the device was created or a negative error code otherwise. 294 * Returns 0 if the device was created or a negative error code otherwise.
290 */ 295 */
291static int register_vlan_device(struct net_device *real_dev, 296static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
292 unsigned short VLAN_ID)
293{ 297{
294 struct net_device *new_dev; 298 struct net_device *new_dev;
295 struct net *net = dev_net(real_dev); 299 struct net *net = dev_net(real_dev);
@@ -297,10 +301,10 @@ static int register_vlan_device(struct net_device *real_dev,
297 char name[IFNAMSIZ]; 301 char name[IFNAMSIZ];
298 int err; 302 int err;
299 303
300 if (VLAN_ID >= VLAN_VID_MASK) 304 if (vlan_id >= VLAN_VID_MASK)
301 return -ERANGE; 305 return -ERANGE;
302 306
303 err = vlan_check_real_dev(real_dev, VLAN_ID); 307 err = vlan_check_real_dev(real_dev, vlan_id);
304 if (err < 0) 308 if (err < 0)
305 return err; 309 return err;
306 310
@@ -308,26 +312,26 @@ static int register_vlan_device(struct net_device *real_dev,
308 switch (vn->name_type) { 312 switch (vn->name_type) {
309 case VLAN_NAME_TYPE_RAW_PLUS_VID: 313 case VLAN_NAME_TYPE_RAW_PLUS_VID:
310 /* name will look like: eth1.0005 */ 314 /* name will look like: eth1.0005 */
311 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, VLAN_ID); 315 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
312 break; 316 break;
313 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: 317 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
314 /* Put our vlan.VID in the name. 318 /* Put our vlan.VID in the name.
315 * Name will look like: vlan5 319 * Name will look like: vlan5
316 */ 320 */
317 snprintf(name, IFNAMSIZ, "vlan%i", VLAN_ID); 321 snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
318 break; 322 break;
319 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: 323 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
320 /* Put our vlan.VID in the name. 324 /* Put our vlan.VID in the name.
321 * Name will look like: eth0.5 325 * Name will look like: eth0.5
322 */ 326 */
323 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, VLAN_ID); 327 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
324 break; 328 break;
325 case VLAN_NAME_TYPE_PLUS_VID: 329 case VLAN_NAME_TYPE_PLUS_VID:
326 /* Put our vlan.VID in the name. 330 /* Put our vlan.VID in the name.
327 * Name will look like: vlan0005 331 * Name will look like: vlan0005
328 */ 332 */
329 default: 333 default:
330 snprintf(name, IFNAMSIZ, "vlan%.4i", VLAN_ID); 334 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
331 } 335 }
332 336
333 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, 337 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name,
@@ -342,7 +346,7 @@ static int register_vlan_device(struct net_device *real_dev,
342 */ 346 */
343 new_dev->mtu = real_dev->mtu; 347 new_dev->mtu = real_dev->mtu;
344 348
345 vlan_dev_info(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */ 349 vlan_dev_info(new_dev)->vlan_id = vlan_id;
346 vlan_dev_info(new_dev)->real_dev = real_dev; 350 vlan_dev_info(new_dev)->real_dev = real_dev;
347 vlan_dev_info(new_dev)->dent = NULL; 351 vlan_dev_info(new_dev)->dent = NULL;
348 vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR; 352 vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
@@ -536,7 +540,6 @@ static struct notifier_block vlan_notifier_block __read_mostly = {
536static int vlan_ioctl_handler(struct net *net, void __user *arg) 540static int vlan_ioctl_handler(struct net *net, void __user *arg)
537{ 541{
538 int err; 542 int err;
539 unsigned short vid = 0;
540 struct vlan_ioctl_args args; 543 struct vlan_ioctl_args args;
541 struct net_device *dev = NULL; 544 struct net_device *dev = NULL;
542 545
@@ -563,8 +566,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
563 goto out; 566 goto out;
564 567
565 err = -EINVAL; 568 err = -EINVAL;
566 if (args.cmd != ADD_VLAN_CMD && 569 if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
567 !(dev->priv_flags & IFF_802_1Q_VLAN))
568 goto out; 570 goto out;
569 } 571 }
570 572
@@ -592,9 +594,9 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
592 err = -EPERM; 594 err = -EPERM;
593 if (!capable(CAP_NET_ADMIN)) 595 if (!capable(CAP_NET_ADMIN))
594 break; 596 break;
595 err = vlan_dev_set_vlan_flag(dev, 597 err = vlan_dev_change_flags(dev,
596 args.u.flag, 598 args.vlan_qos ? args.u.flag : 0,
597 args.vlan_qos); 599 args.u.flag);
598 break; 600 break;
599 601
600 case SET_VLAN_NAME_TYPE_CMD: 602 case SET_VLAN_NAME_TYPE_CMD:
@@ -638,8 +640,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
638 640
639 case GET_VLAN_VID_CMD: 641 case GET_VLAN_VID_CMD:
640 err = 0; 642 err = 0;
641 vlan_dev_get_vid(dev, &vid); 643 args.u.VID = vlan_dev_vlan_id(dev);
642 args.u.VID = vid;
643 if (copy_to_user(arg, &args, 644 if (copy_to_user(arg, &args,
644 sizeof(struct vlan_ioctl_args))) 645 sizeof(struct vlan_ioctl_args)))
645 err = -EFAULT; 646 err = -EFAULT;
@@ -714,14 +715,20 @@ static int __init vlan_proto_init(void)
714 if (err < 0) 715 if (err < 0)
715 goto err2; 716 goto err2;
716 717
717 err = vlan_netlink_init(); 718 err = vlan_gvrp_init();
718 if (err < 0) 719 if (err < 0)
719 goto err3; 720 goto err3;
720 721
722 err = vlan_netlink_init();
723 if (err < 0)
724 goto err4;
725
721 dev_add_pack(&vlan_packet_type); 726 dev_add_pack(&vlan_packet_type);
722 vlan_ioctl_set(vlan_ioctl_handler); 727 vlan_ioctl_set(vlan_ioctl_handler);
723 return 0; 728 return 0;
724 729
730err4:
731 vlan_gvrp_uninit();
725err3: 732err3:
726 unregister_netdevice_notifier(&vlan_notifier_block); 733 unregister_netdevice_notifier(&vlan_notifier_block);
727err2: 734err2:
@@ -746,8 +753,9 @@ static void __exit vlan_cleanup_module(void)
746 BUG_ON(!hlist_empty(&vlan_group_hash[i])); 753 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
747 754
748 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); 755 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
749
750 synchronize_net(); 756 synchronize_net();
757
758 vlan_gvrp_uninit();
751} 759}
752 760
753module_init(vlan_proto_init); 761module_init(vlan_proto_init);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5229a72c7ea1..a6603a4d917f 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -3,6 +3,55 @@
3 3
4#include <linux/if_vlan.h> 4#include <linux/if_vlan.h>
5 5
6
7/**
8 * struct vlan_priority_tci_mapping - vlan egress priority mappings
9 * @priority: skb priority
10 * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
11 * @next: pointer to next struct
12 */
13struct vlan_priority_tci_mapping {
14 u32 priority;
15 u16 vlan_qos;
16 struct vlan_priority_tci_mapping *next;
17};
18
19/**
20 * struct vlan_dev_info - VLAN private device data
21 * @nr_ingress_mappings: number of ingress priority mappings
22 * @ingress_priority_map: ingress priority mappings
23 * @nr_egress_mappings: number of egress priority mappings
24 * @egress_priority_map: hash of egress priority mappings
25 * @vlan_id: VLAN identifier
26 * @flags: device flags
27 * @real_dev: underlying netdevice
28 * @real_dev_addr: address of underlying netdevice
29 * @dent: proc dir entry
30 * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX
31 * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
32 */
33struct vlan_dev_info {
34 unsigned int nr_ingress_mappings;
35 u32 ingress_priority_map[8];
36 unsigned int nr_egress_mappings;
37 struct vlan_priority_tci_mapping *egress_priority_map[16];
38
39 u16 vlan_id;
40 u16 flags;
41
42 struct net_device *real_dev;
43 unsigned char real_dev_addr[ETH_ALEN];
44
45 struct proc_dir_entry *dent;
46 unsigned long cnt_inc_headroom_on_tx;
47 unsigned long cnt_encap_on_xmit;
48};
49
50static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
51{
52 return netdev_priv(dev);
53}
54
6#define VLAN_GRP_HASH_SHIFT 5 55#define VLAN_GRP_HASH_SHIFT 5
7#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT) 56#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
8#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1) 57#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
@@ -18,26 +67,47 @@
18 * Must be invoked with rcu_read_lock (ie preempt disabled) 67 * Must be invoked with rcu_read_lock (ie preempt disabled)
19 * or with RTNL. 68 * or with RTNL.
20 */ 69 */
21struct net_device *__find_vlan_dev(struct net_device *real_dev, 70struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id);
22 unsigned short VID); /* vlan.c */
23 71
24/* found in vlan_dev.c */ 72/* found in vlan_dev.c */
25int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, 73int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
26 struct packet_type *ptype, struct net_device *orig_dev); 74 struct packet_type *ptype, struct net_device *orig_dev);
27void vlan_dev_set_ingress_priority(const struct net_device *dev, 75void vlan_dev_set_ingress_priority(const struct net_device *dev,
28 u32 skb_prio, short vlan_prio); 76 u32 skb_prio, u16 vlan_prio);
29int vlan_dev_set_egress_priority(const struct net_device *dev, 77int vlan_dev_set_egress_priority(const struct net_device *dev,
30 u32 skb_prio, short vlan_prio); 78 u32 skb_prio, u16 vlan_prio);
31int vlan_dev_set_vlan_flag(const struct net_device *dev, 79int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
32 u32 flag, short flag_val);
33void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); 80void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
34void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result);
35 81
36int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id); 82int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id);
37void vlan_setup(struct net_device *dev); 83void vlan_setup(struct net_device *dev);
38int register_vlan_dev(struct net_device *dev); 84int register_vlan_dev(struct net_device *dev);
39void unregister_vlan_dev(struct net_device *dev); 85void unregister_vlan_dev(struct net_device *dev);
40 86
87static inline u32 vlan_get_ingress_priority(struct net_device *dev,
88 u16 vlan_tci)
89{
90 struct vlan_dev_info *vip = vlan_dev_info(dev);
91
92 return vip->ingress_priority_map[(vlan_tci >> 13) & 0x7];
93}
94
95#ifdef CONFIG_VLAN_8021Q_GVRP
96extern int vlan_gvrp_request_join(const struct net_device *dev);
97extern void vlan_gvrp_request_leave(const struct net_device *dev);
98extern int vlan_gvrp_init_applicant(struct net_device *dev);
99extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
100extern int vlan_gvrp_init(void);
101extern void vlan_gvrp_uninit(void);
102#else
103static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
104static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
105static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; }
106static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {}
107static inline int vlan_gvrp_init(void) { return 0; }
108static inline void vlan_gvrp_uninit(void) {}
109#endif
110
41int vlan_netlink_init(void); 111int vlan_netlink_init(void);
42void vlan_netlink_fini(void); 112void vlan_netlink_fini(void);
43 113
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
new file mode 100644
index 000000000000..916061f681b6
--- /dev/null
+++ b/net/8021q/vlan_core.c
@@ -0,0 +1,64 @@
1#include <linux/skbuff.h>
2#include <linux/netdevice.h>
3#include <linux/if_vlan.h>
4#include "vlan.h"
5
6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8 u16 vlan_tci, int polling)
9{
10 struct net_device_stats *stats;
11
12 if (skb_bond_should_drop(skb)) {
13 dev_kfree_skb_any(skb);
14 return NET_RX_DROP;
15 }
16
17 skb->vlan_tci = vlan_tci;
18 netif_nit_deliver(skb);
19
20 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
21 if (skb->dev == NULL) {
22 dev_kfree_skb_any(skb);
23 /* Not NET_RX_DROP, this is not being dropped
24 * due to congestion. */
25 return NET_RX_SUCCESS;
26 }
27 skb->dev->last_rx = jiffies;
28 skb->vlan_tci = 0;
29
30 stats = &skb->dev->stats;
31 stats->rx_packets++;
32 stats->rx_bytes += skb->len;
33
34 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
35 switch (skb->pkt_type) {
36 case PACKET_BROADCAST:
37 break;
38 case PACKET_MULTICAST:
39 stats->multicast++;
40 break;
41 case PACKET_OTHERHOST:
42 /* Our lower layer thinks this is not local, let's make sure.
43 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
46 skb->dev->dev_addr))
47 skb->pkt_type = PACKET_HOST;
48 break;
49 };
50 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
51}
52EXPORT_SYMBOL(__vlan_hwaccel_rx);
53
54struct net_device *vlan_dev_real_dev(const struct net_device *dev)
55{
56 return vlan_dev_info(dev)->real_dev;
57}
58EXPORT_SYMBOL_GPL(vlan_dev_real_dev);
59
60u16 vlan_dev_vlan_id(const struct net_device *dev)
61{
62 return vlan_dev_info(dev)->vlan_id;
63}
64EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 5d055c242ed8..f42bc2b26b85 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -21,21 +21,15 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/in.h>
26#include <linux/init.h>
27#include <asm/uaccess.h> /* for copy_from_user */
28#include <linux/skbuff.h> 24#include <linux/skbuff.h>
29#include <linux/netdevice.h> 25#include <linux/netdevice.h>
30#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
31#include <net/datalink.h> 27#include <linux/ethtool.h>
32#include <net/p8022.h>
33#include <net/arp.h> 28#include <net/arp.h>
34 29
35#include "vlan.h" 30#include "vlan.h"
36#include "vlanproc.h" 31#include "vlanproc.h"
37#include <linux/if_vlan.h> 32#include <linux/if_vlan.h>
38#include <net/ip.h>
39 33
40/* 34/*
41 * Rebuild the Ethernet MAC header. This is called after an ARP 35 * Rebuild the Ethernet MAC header. This is called after an ARP
@@ -73,11 +67,8 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
73static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) 67static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
74{ 68{
75 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { 69 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
76 if (skb_shared(skb) || skb_cloned(skb)) { 70 if (skb_cow(skb, skb_headroom(skb)) < 0)
77 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 71 skb = NULL;
78 kfree_skb(skb);
79 skb = nskb;
80 }
81 if (skb) { 72 if (skb) {
82 /* Lifted from Gleb's VLAN code... */ 73 /* Lifted from Gleb's VLAN code... */
83 memmove(skb->data - ETH_HLEN, 74 memmove(skb->data - ETH_HLEN,
@@ -149,9 +140,9 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
149 struct packet_type *ptype, struct net_device *orig_dev) 140 struct packet_type *ptype, struct net_device *orig_dev)
150{ 141{
151 struct vlan_hdr *vhdr; 142 struct vlan_hdr *vhdr;
152 unsigned short vid;
153 struct net_device_stats *stats; 143 struct net_device_stats *stats;
154 unsigned short vlan_TCI; 144 u16 vlan_id;
145 u16 vlan_tci;
155 146
156 skb = skb_share_check(skb, GFP_ATOMIC); 147 skb = skb_share_check(skb, GFP_ATOMIC);
157 if (skb == NULL) 148 if (skb == NULL)
@@ -161,14 +152,14 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
161 goto err_free; 152 goto err_free;
162 153
163 vhdr = (struct vlan_hdr *)skb->data; 154 vhdr = (struct vlan_hdr *)skb->data;
164 vlan_TCI = ntohs(vhdr->h_vlan_TCI); 155 vlan_tci = ntohs(vhdr->h_vlan_TCI);
165 vid = (vlan_TCI & VLAN_VID_MASK); 156 vlan_id = vlan_tci & VLAN_VID_MASK;
166 157
167 rcu_read_lock(); 158 rcu_read_lock();
168 skb->dev = __find_vlan_dev(dev, vid); 159 skb->dev = __find_vlan_dev(dev, vlan_id);
169 if (!skb->dev) { 160 if (!skb->dev) {
170 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", 161 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
171 __func__, (unsigned int)vid, dev->name); 162 __func__, vlan_id, dev->name);
172 goto err_unlock; 163 goto err_unlock;
173 } 164 }
174 165
@@ -180,11 +171,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
180 171
181 skb_pull_rcsum(skb, VLAN_HLEN); 172 skb_pull_rcsum(skb, VLAN_HLEN);
182 173
183 skb->priority = vlan_get_ingress_priority(skb->dev, 174 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
184 ntohs(vhdr->h_vlan_TCI));
185 175
186 pr_debug("%s: priority: %u for TCI: %hu\n", 176 pr_debug("%s: priority: %u for TCI: %hu\n",
187 __func__, skb->priority, ntohs(vhdr->h_vlan_TCI)); 177 __func__, skb->priority, vlan_tci);
188 178
189 switch (skb->pkt_type) { 179 switch (skb->pkt_type) {
190 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ 180 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
@@ -227,7 +217,7 @@ err_free:
227 return NET_RX_DROP; 217 return NET_RX_DROP;
228} 218}
229 219
230static inline unsigned short 220static inline u16
231vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) 221vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
232{ 222{
233 struct vlan_priority_tci_mapping *mp; 223 struct vlan_priority_tci_mapping *mp;
@@ -259,103 +249,44 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
259 unsigned int len) 249 unsigned int len)
260{ 250{
261 struct vlan_hdr *vhdr; 251 struct vlan_hdr *vhdr;
262 unsigned short veth_TCI = 0; 252 unsigned int vhdrlen = 0;
263 int rc = 0; 253 u16 vlan_tci = 0;
264 int build_vlan_header = 0; 254 int rc;
265 struct net_device *vdev = dev;
266
267 pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n",
268 __func__, skb, type, len, vlan_dev_info(dev)->vlan_id,
269 daddr);
270
271 /* build vlan header only if re_order_header flag is NOT set. This
272 * fixes some programs that get confused when they see a VLAN device
273 * sending a frame that is VLAN encoded (the consensus is that the VLAN
274 * device should look completely like an Ethernet device when the
275 * REORDER_HEADER flag is set) The drawback to this is some extra
276 * header shuffling in the hard_start_xmit. Users can turn off this
277 * REORDER behaviour with the vconfig tool.
278 */
279 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR))
280 build_vlan_header = 1;
281 255
282 if (build_vlan_header) { 256 if (WARN_ON(skb_headroom(skb) < dev->hard_header_len))
283 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); 257 return -ENOSPC;
284 258
285 /* build the four bytes that make this a VLAN header. */ 259 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
286 260 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
287 /* Now, construct the second two bytes. This field looks
288 * something like:
289 * usr_priority: 3 bits (high bits)
290 * CFI 1 bit
291 * VLAN ID 12 bits (low bits)
292 *
293 */
294 veth_TCI = vlan_dev_info(dev)->vlan_id;
295 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
296 261
297 vhdr->h_vlan_TCI = htons(veth_TCI); 262 vlan_tci = vlan_dev_info(dev)->vlan_id;
263 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
264 vhdr->h_vlan_TCI = htons(vlan_tci);
298 265
299 /* 266 /*
300 * Set the protocol type. For a packet of type ETH_P_802_3 we 267 * Set the protocol type. For a packet of type ETH_P_802_3 we
301 * put the length in here instead. It is up to the 802.2 268 * put the length in here instead. It is up to the 802.2
302 * layer to carry protocol information. 269 * layer to carry protocol information.
303 */ 270 */
304
305 if (type != ETH_P_802_3) 271 if (type != ETH_P_802_3)
306 vhdr->h_vlan_encapsulated_proto = htons(type); 272 vhdr->h_vlan_encapsulated_proto = htons(type);
307 else 273 else
308 vhdr->h_vlan_encapsulated_proto = htons(len); 274 vhdr->h_vlan_encapsulated_proto = htons(len);
309 275
310 skb->protocol = htons(ETH_P_8021Q); 276 skb->protocol = htons(ETH_P_8021Q);
311 skb_reset_network_header(skb); 277 type = ETH_P_8021Q;
278 vhdrlen = VLAN_HLEN;
312 } 279 }
313 280
314 /* Before delegating work to the lower layer, enter our MAC-address */ 281 /* Before delegating work to the lower layer, enter our MAC-address */
315 if (saddr == NULL) 282 if (saddr == NULL)
316 saddr = dev->dev_addr; 283 saddr = dev->dev_addr;
317 284
285 /* Now make the underlying real hard header */
318 dev = vlan_dev_info(dev)->real_dev; 286 dev = vlan_dev_info(dev)->real_dev;
319 287 rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
320 /* MPLS can send us skbuffs w/out enough space. This check will grow 288 if (rc > 0)
321 * the skb if it doesn't have enough headroom. Not a beautiful solution, 289 rc += vhdrlen;
322 * so I'll tick a counter so that users can know it's happening...
323 * If they care...
324 */
325
326 /* NOTE: This may still break if the underlying device is not the final
327 * device (and thus there are more headers to add...) It should work for
328 * good-ole-ethernet though.
329 */
330 if (skb_headroom(skb) < dev->hard_header_len) {
331 struct sk_buff *sk_tmp = skb;
332 skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len);
333 kfree_skb(sk_tmp);
334 if (skb == NULL) {
335 struct net_device_stats *stats = &vdev->stats;
336 stats->tx_dropped++;
337 return -ENOMEM;
338 }
339 vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++;
340 pr_debug("%s: %s: had to grow skb\n", __func__, vdev->name);
341 }
342
343 if (build_vlan_header) {
344 /* Now make the underlying real hard header */
345 rc = dev_hard_header(skb, dev, ETH_P_8021Q, daddr, saddr,
346 len + VLAN_HLEN);
347 if (rc > 0)
348 rc += VLAN_HLEN;
349 else if (rc < 0)
350 rc -= VLAN_HLEN;
351 } else
352 /* If here, then we'll just make a normal looking ethernet
353 * frame, but, the hard_start_xmit method will insert the tag
354 * (it has to be able to do this for bridged and other skbs
355 * that don't come down the protocol stack in an orderly manner.
356 */
357 rc = dev_hard_header(skb, dev, type, daddr, saddr, len);
358
359 return rc; 290 return rc;
360} 291}
361 292
@@ -369,78 +300,49 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
369 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING 300 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
370 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... 301 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
371 */ 302 */
372
373 if (veth->h_vlan_proto != htons(ETH_P_8021Q) || 303 if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
374 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { 304 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
375 int orig_headroom = skb_headroom(skb); 305 unsigned int orig_headroom = skb_headroom(skb);
376 unsigned short veth_TCI; 306 u16 vlan_tci;
377 307
378 /* This is not a VLAN frame...but we can fix that! */
379 vlan_dev_info(dev)->cnt_encap_on_xmit++; 308 vlan_dev_info(dev)->cnt_encap_on_xmit++;
380 309
381 pr_debug("%s: proto to encap: 0x%hx\n", 310 vlan_tci = vlan_dev_info(dev)->vlan_id;
382 __func__, ntohs(veth->h_vlan_proto)); 311 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
383 /* Construct the second two bytes. This field looks something 312 skb = __vlan_put_tag(skb, vlan_tci);
384 * like:
385 * usr_priority: 3 bits (high bits)
386 * CFI 1 bit
387 * VLAN ID 12 bits (low bits)
388 */
389 veth_TCI = vlan_dev_info(dev)->vlan_id;
390 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
391
392 skb = __vlan_put_tag(skb, veth_TCI);
393 if (!skb) { 313 if (!skb) {
394 stats->tx_dropped++; 314 stats->tx_dropped++;
395 return 0; 315 return NETDEV_TX_OK;
396 } 316 }
397 317
398 if (orig_headroom < VLAN_HLEN) 318 if (orig_headroom < VLAN_HLEN)
399 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; 319 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
400 } 320 }
401 321
402 pr_debug("%s: about to send skb: %p to dev: %s\n", 322 stats->tx_packets++;
403 __func__, skb, skb->dev->name);
404 pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n",
405 veth->h_dest[0], veth->h_dest[1], veth->h_dest[2],
406 veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
407 veth->h_source[0], veth->h_source[1], veth->h_source[2],
408 veth->h_source[3], veth->h_source[4], veth->h_source[5],
409 veth->h_vlan_proto, veth->h_vlan_TCI,
410 veth->h_vlan_encapsulated_proto);
411
412 stats->tx_packets++; /* for statics only */
413 stats->tx_bytes += skb->len; 323 stats->tx_bytes += skb->len;
414 324
415 skb->dev = vlan_dev_info(dev)->real_dev; 325 skb->dev = vlan_dev_info(dev)->real_dev;
416 dev_queue_xmit(skb); 326 dev_queue_xmit(skb);
417 327 return NETDEV_TX_OK;
418 return 0;
419} 328}
420 329
421static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 330static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
422 struct net_device *dev) 331 struct net_device *dev)
423{ 332{
424 struct net_device_stats *stats = &dev->stats; 333 struct net_device_stats *stats = &dev->stats;
425 unsigned short veth_TCI; 334 u16 vlan_tci;
426 335
427 /* Construct the second two bytes. This field looks something 336 vlan_tci = vlan_dev_info(dev)->vlan_id;
428 * like: 337 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
429 * usr_priority: 3 bits (high bits) 338 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
430 * CFI 1 bit
431 * VLAN ID 12 bits (low bits)
432 */
433 veth_TCI = vlan_dev_info(dev)->vlan_id;
434 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
435 skb = __vlan_hwaccel_put_tag(skb, veth_TCI);
436 339
437 stats->tx_packets++; 340 stats->tx_packets++;
438 stats->tx_bytes += skb->len; 341 stats->tx_bytes += skb->len;
439 342
440 skb->dev = vlan_dev_info(dev)->real_dev; 343 skb->dev = vlan_dev_info(dev)->real_dev;
441 dev_queue_xmit(skb); 344 dev_queue_xmit(skb);
442 345 return NETDEV_TX_OK;
443 return 0;
444} 346}
445 347
446static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 348static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
@@ -457,7 +359,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
457} 359}
458 360
459void vlan_dev_set_ingress_priority(const struct net_device *dev, 361void vlan_dev_set_ingress_priority(const struct net_device *dev,
460 u32 skb_prio, short vlan_prio) 362 u32 skb_prio, u16 vlan_prio)
461{ 363{
462 struct vlan_dev_info *vlan = vlan_dev_info(dev); 364 struct vlan_dev_info *vlan = vlan_dev_info(dev);
463 365
@@ -470,7 +372,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
470} 372}
471 373
472int vlan_dev_set_egress_priority(const struct net_device *dev, 374int vlan_dev_set_egress_priority(const struct net_device *dev,
473 u32 skb_prio, short vlan_prio) 375 u32 skb_prio, u16 vlan_prio)
474{ 376{
475 struct vlan_dev_info *vlan = vlan_dev_info(dev); 377 struct vlan_dev_info *vlan = vlan_dev_info(dev);
476 struct vlan_priority_tci_mapping *mp = NULL; 378 struct vlan_priority_tci_mapping *mp = NULL;
@@ -507,18 +409,23 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
507} 409}
508 410
509/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ 411/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
510int vlan_dev_set_vlan_flag(const struct net_device *dev, 412int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
511 u32 flag, short flag_val)
512{ 413{
513 /* verify flag is supported */ 414 struct vlan_dev_info *vlan = vlan_dev_info(dev);
514 if (flag == VLAN_FLAG_REORDER_HDR) { 415 u32 old_flags = vlan->flags;
515 if (flag_val) 416
516 vlan_dev_info(dev)->flags |= VLAN_FLAG_REORDER_HDR; 417 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
418 return -EINVAL;
419
420 vlan->flags = (old_flags & ~mask) | (flags & mask);
421
422 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
423 if (vlan->flags & VLAN_FLAG_GVRP)
424 vlan_gvrp_request_join(dev);
517 else 425 else
518 vlan_dev_info(dev)->flags &= ~VLAN_FLAG_REORDER_HDR; 426 vlan_gvrp_request_leave(dev);
519 return 0;
520 } 427 }
521 return -EINVAL; 428 return 0;
522} 429}
523 430
524void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) 431void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
@@ -526,11 +433,6 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
526 strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); 433 strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
527} 434}
528 435
529void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result)
530{
531 *result = vlan_dev_info(dev)->vlan_id;
532}
533
534static int vlan_dev_open(struct net_device *dev) 436static int vlan_dev_open(struct net_device *dev)
535{ 437{
536 struct vlan_dev_info *vlan = vlan_dev_info(dev); 438 struct vlan_dev_info *vlan = vlan_dev_info(dev);
@@ -543,21 +445,44 @@ static int vlan_dev_open(struct net_device *dev)
543 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 445 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
544 err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); 446 err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN);
545 if (err < 0) 447 if (err < 0)
546 return err; 448 goto out;
449 }
450
451 if (dev->flags & IFF_ALLMULTI) {
452 err = dev_set_allmulti(real_dev, 1);
453 if (err < 0)
454 goto del_unicast;
547 } 455 }
456 if (dev->flags & IFF_PROMISC) {
457 err = dev_set_promiscuity(real_dev, 1);
458 if (err < 0)
459 goto clear_allmulti;
460 }
461
548 memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN); 462 memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN);
549 463
550 if (dev->flags & IFF_ALLMULTI) 464 if (vlan->flags & VLAN_FLAG_GVRP)
551 dev_set_allmulti(real_dev, 1); 465 vlan_gvrp_request_join(dev);
552 if (dev->flags & IFF_PROMISC)
553 dev_set_promiscuity(real_dev, 1);
554 466
555 return 0; 467 return 0;
468
469clear_allmulti:
470 if (dev->flags & IFF_ALLMULTI)
471 dev_set_allmulti(real_dev, -1);
472del_unicast:
473 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
474 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN);
475out:
476 return err;
556} 477}
557 478
558static int vlan_dev_stop(struct net_device *dev) 479static int vlan_dev_stop(struct net_device *dev)
559{ 480{
560 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 481 struct vlan_dev_info *vlan = vlan_dev_info(dev);
482 struct net_device *real_dev = vlan->real_dev;
483
484 if (vlan->flags & VLAN_FLAG_GVRP)
485 vlan_gvrp_request_leave(dev);
561 486
562 dev_mc_unsync(real_dev, dev); 487 dev_mc_unsync(real_dev, dev);
563 dev_unicast_unsync(real_dev, dev); 488 dev_unicast_unsync(real_dev, dev);
@@ -645,6 +570,20 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
645 */ 570 */
646static struct lock_class_key vlan_netdev_xmit_lock_key; 571static struct lock_class_key vlan_netdev_xmit_lock_key;
647 572
573static void vlan_dev_set_lockdep_one(struct net_device *dev,
574 struct netdev_queue *txq,
575 void *_subclass)
576{
577 lockdep_set_class_and_subclass(&txq->_xmit_lock,
578 &vlan_netdev_xmit_lock_key,
579 *(int *)_subclass);
580}
581
582static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
583{
584 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
585}
586
648static const struct header_ops vlan_header_ops = { 587static const struct header_ops vlan_header_ops = {
649 .create = vlan_dev_hard_header, 588 .create = vlan_dev_hard_header,
650 .rebuild = vlan_dev_rebuild_header, 589 .rebuild = vlan_dev_rebuild_header,
@@ -683,11 +622,10 @@ static int vlan_dev_init(struct net_device *dev)
683 dev->hard_start_xmit = vlan_dev_hard_start_xmit; 622 dev->hard_start_xmit = vlan_dev_hard_start_xmit;
684 } 623 }
685 624
686 if (real_dev->priv_flags & IFF_802_1Q_VLAN) 625 if (is_vlan_dev(real_dev))
687 subclass = 1; 626 subclass = 1;
688 627
689 lockdep_set_class_and_subclass(&dev->_xmit_lock, 628 vlan_dev_set_lockdep_class(dev, subclass);
690 &vlan_netdev_xmit_lock_key, subclass);
691 return 0; 629 return 0;
692} 630}
693 631
@@ -705,6 +643,35 @@ static void vlan_dev_uninit(struct net_device *dev)
705 } 643 }
706} 644}
707 645
646static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
647{
648 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
649 struct net_device *real_dev = vlan->real_dev;
650
651 if (real_dev->ethtool_ops == NULL ||
652 real_dev->ethtool_ops->get_rx_csum == NULL)
653 return 0;
654 return real_dev->ethtool_ops->get_rx_csum(real_dev);
655}
656
657static u32 vlan_ethtool_get_flags(struct net_device *dev)
658{
659 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
660 struct net_device *real_dev = vlan->real_dev;
661
662 if (!(real_dev->features & NETIF_F_HW_VLAN_RX) ||
663 real_dev->ethtool_ops == NULL ||
664 real_dev->ethtool_ops->get_flags == NULL)
665 return 0;
666 return real_dev->ethtool_ops->get_flags(real_dev);
667}
668
669static const struct ethtool_ops vlan_ethtool_ops = {
670 .get_link = ethtool_op_get_link,
671 .get_rx_csum = vlan_ethtool_get_rx_csum,
672 .get_flags = vlan_ethtool_get_flags,
673};
674
708void vlan_setup(struct net_device *dev) 675void vlan_setup(struct net_device *dev)
709{ 676{
710 ether_setup(dev); 677 ether_setup(dev);
@@ -723,6 +690,7 @@ void vlan_setup(struct net_device *dev)
723 dev->change_rx_flags = vlan_dev_change_rx_flags; 690 dev->change_rx_flags = vlan_dev_change_rx_flags;
724 dev->do_ioctl = vlan_dev_ioctl; 691 dev->do_ioctl = vlan_dev_ioctl;
725 dev->destructor = free_netdev; 692 dev->destructor = free_netdev;
693 dev->ethtool_ops = &vlan_ethtool_ops;
726 694
727 memset(dev->broadcast, 0, ETH_ALEN); 695 memset(dev->broadcast, 0, ETH_ALEN);
728} 696}
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c
new file mode 100644
index 000000000000..061ceceeef12
--- /dev/null
+++ b/net/8021q/vlan_gvrp.c
@@ -0,0 +1,66 @@
1/*
2 * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP)
3 *
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10#include <linux/types.h>
11#include <linux/if_vlan.h>
12#include <net/garp.h>
13#include "vlan.h"
14
15#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
16
17enum gvrp_attributes {
18 GVRP_ATTR_INVALID,
19 GVRP_ATTR_VID,
20 __GVRP_ATTR_MAX
21};
22#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1)
23
24static struct garp_application vlan_gvrp_app __read_mostly = {
25 .proto.group_address = GARP_GVRP_ADDRESS,
26 .maxattr = GVRP_ATTR_MAX,
27 .type = GARP_APPLICATION_GVRP,
28};
29
30int vlan_gvrp_request_join(const struct net_device *dev)
31{
32 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
33 __be16 vlan_id = htons(vlan->vlan_id);
34
35 return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
36 &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
37}
38
39void vlan_gvrp_request_leave(const struct net_device *dev)
40{
41 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
42 __be16 vlan_id = htons(vlan->vlan_id);
43
44 garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
45 &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
46}
47
48int vlan_gvrp_init_applicant(struct net_device *dev)
49{
50 return garp_init_applicant(dev, &vlan_gvrp_app);
51}
52
53void vlan_gvrp_uninit_applicant(struct net_device *dev)
54{
55 garp_uninit_applicant(dev, &vlan_gvrp_app);
56}
57
58int __init vlan_gvrp_init(void)
59{
60 return garp_register_application(&vlan_gvrp_app);
61}
62
63void vlan_gvrp_uninit(void)
64{
65 garp_unregister_application(&vlan_gvrp_app);
66}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index c93e69ec28ed..e9c91dcecc9b 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -59,7 +59,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
59 } 59 }
60 if (data[IFLA_VLAN_FLAGS]) { 60 if (data[IFLA_VLAN_FLAGS]) {
61 flags = nla_data(data[IFLA_VLAN_FLAGS]); 61 flags = nla_data(data[IFLA_VLAN_FLAGS]);
62 if ((flags->flags & flags->mask) & ~VLAN_FLAG_REORDER_HDR) 62 if ((flags->flags & flags->mask) &
63 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
63 return -EINVAL; 64 return -EINVAL;
64 } 65 }
65 66
@@ -75,7 +76,6 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
75static int vlan_changelink(struct net_device *dev, 76static int vlan_changelink(struct net_device *dev,
76 struct nlattr *tb[], struct nlattr *data[]) 77 struct nlattr *tb[], struct nlattr *data[])
77{ 78{
78 struct vlan_dev_info *vlan = vlan_dev_info(dev);
79 struct ifla_vlan_flags *flags; 79 struct ifla_vlan_flags *flags;
80 struct ifla_vlan_qos_mapping *m; 80 struct ifla_vlan_qos_mapping *m;
81 struct nlattr *attr; 81 struct nlattr *attr;
@@ -83,8 +83,7 @@ static int vlan_changelink(struct net_device *dev,
83 83
84 if (data[IFLA_VLAN_FLAGS]) { 84 if (data[IFLA_VLAN_FLAGS]) {
85 flags = nla_data(data[IFLA_VLAN_FLAGS]); 85 flags = nla_data(data[IFLA_VLAN_FLAGS]);
86 vlan->flags = (vlan->flags & ~flags->mask) | 86 vlan_dev_change_flags(dev, flags->flags, flags->mask);
87 (flags->flags & flags->mask);
88 } 87 }
89 if (data[IFLA_VLAN_INGRESS_QOS]) { 88 if (data[IFLA_VLAN_INGRESS_QOS]) {
90 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { 89 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 08b54b593d56..0feefa4e1a4b 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -18,16 +18,9 @@
18 *****************************************************************************/ 18 *****************************************************************************/
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/stddef.h> /* offsetof(), etc. */ 21#include <linux/errno.h>
22#include <linux/errno.h> /* return codes */
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/slab.h> /* kmalloc(), kfree() */ 23#include <linux/string.h>
25#include <linux/mm.h>
26#include <linux/string.h> /* inline mem*, str* functions */
27#include <linux/init.h> /* __initfunc et al. */
28#include <asm/byteorder.h> /* htons(), etc. */
29#include <asm/uaccess.h> /* copy_to_user */
30#include <asm/io.h>
31#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
32#include <linux/seq_file.h> 25#include <linux/seq_file.h>
33#include <linux/fs.h> 26#include <linux/fs.h>
@@ -290,7 +283,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
290 static const char fmt[] = "%30s %12lu\n"; 283 static const char fmt[] = "%30s %12lu\n";
291 int i; 284 int i;
292 285
293 if (!(vlandev->priv_flags & IFF_802_1Q_VLAN)) 286 if (!is_vlan_dev(vlandev))
294 return 0; 287 return 0;
295 288
296 seq_printf(seq, 289 seq_printf(seq,
diff --git a/net/Kconfig b/net/Kconfig
index acbf7c60e89b..b98668751749 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -181,6 +181,7 @@ source "net/dccp/Kconfig"
181source "net/sctp/Kconfig" 181source "net/sctp/Kconfig"
182source "net/tipc/Kconfig" 182source "net/tipc/Kconfig"
183source "net/atm/Kconfig" 183source "net/atm/Kconfig"
184source "net/802/Kconfig"
184source "net/bridge/Kconfig" 185source "net/bridge/Kconfig"
185source "net/8021q/Kconfig" 186source "net/8021q/Kconfig"
186source "net/decnet/Kconfig" 187source "net/decnet/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index b7a13643b549..4f43e7f874f3 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -42,7 +42,9 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/
42obj-$(CONFIG_ATM) += atm/ 42obj-$(CONFIG_ATM) += atm/
43obj-$(CONFIG_DECNET) += decnet/ 43obj-$(CONFIG_DECNET) += decnet/
44obj-$(CONFIG_ECONET) += econet/ 44obj-$(CONFIG_ECONET) += econet/
45obj-$(CONFIG_VLAN_8021Q) += 8021q/ 45ifneq ($(CONFIG_VLAN_8021Q),)
46obj-y += 8021q/
47endif
46obj-$(CONFIG_IP_DCCP) += dccp/ 48obj-$(CONFIG_IP_DCCP) += dccp/
47obj-$(CONFIG_IP_SCTP) += sctp/ 49obj-$(CONFIG_IP_SCTP) += sctp/
48obj-y += wireless/ 50obj-y += wireless/
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 6afa77d63bb5..82e85abc303d 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -9,7 +9,7 @@
9#include "signaling.h" 9#include "signaling.h"
10#include "addr.h" 10#include "addr.h"
11 11
12static int check_addr(struct sockaddr_atmsvc *addr) 12static int check_addr(const struct sockaddr_atmsvc *addr)
13{ 13{
14 int i; 14 int i;
15 15
@@ -23,7 +23,7 @@ static int check_addr(struct sockaddr_atmsvc *addr)
23 return -EINVAL; 23 return -EINVAL;
24} 24}
25 25
26static int identical(struct sockaddr_atmsvc *a, struct sockaddr_atmsvc *b) 26static int identical(const struct sockaddr_atmsvc *a, const struct sockaddr_atmsvc *b)
27{ 27{
28 if (*a->sas_addr.prv) 28 if (*a->sas_addr.prv)
29 if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN)) 29 if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN))
@@ -35,7 +35,7 @@ static int identical(struct sockaddr_atmsvc *a, struct sockaddr_atmsvc *b)
35 return !strcmp(a->sas_addr.pub, b->sas_addr.pub); 35 return !strcmp(a->sas_addr.pub, b->sas_addr.pub);
36} 36}
37 37
38static void notify_sigd(struct atm_dev *dev) 38static void notify_sigd(const struct atm_dev *dev)
39{ 39{
40 struct sockaddr_atmpvc pvc; 40 struct sockaddr_atmpvc pvc;
41 41
@@ -63,7 +63,7 @@ void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype)
63 notify_sigd(dev); 63 notify_sigd(dev);
64} 64}
65 65
66int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 66int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
67 enum atm_addr_type_t atype) 67 enum atm_addr_type_t atype)
68{ 68{
69 unsigned long flags; 69 unsigned long flags;
@@ -98,7 +98,7 @@ int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr,
98 return 0; 98 return 0;
99} 99}
100 100
101int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 101int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
102 enum atm_addr_type_t atype) 102 enum atm_addr_type_t atype)
103{ 103{
104 unsigned long flags; 104 unsigned long flags;
diff --git a/net/atm/addr.h b/net/atm/addr.h
index f39433ad45da..6837e9e7eb13 100644
--- a/net/atm/addr.h
+++ b/net/atm/addr.h
@@ -10,9 +10,9 @@
10#include <linux/atmdev.h> 10#include <linux/atmdev.h>
11 11
12void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type); 12void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type);
13int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 13int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
14 enum atm_addr_type_t type); 14 enum atm_addr_type_t type);
15int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 15int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
16 enum atm_addr_type_t type); 16 enum atm_addr_type_t type);
17int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf, 17int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf,
18 size_t size, enum atm_addr_type_t type); 18 size_t size, enum atm_addr_type_t type);
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 05fafdc2eea3..8d9a6f158880 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -52,12 +52,12 @@ static void skb_debug(const struct sk_buff *skb)
52#define ETHERTYPE_IPV6 0x86, 0xdd 52#define ETHERTYPE_IPV6 0x86, 0xdd
53#define PAD_BRIDGED 0x00, 0x00 53#define PAD_BRIDGED 0x00, 0x00
54 54
55static unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 }; 55static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
56static unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 }; 56static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
57static unsigned char llc_oui_pid_pad[] = 57static const unsigned char llc_oui_pid_pad[] =
58 { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED }; 58 { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
59static unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 }; 59static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
60static unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 }; 60static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
61 61
62enum br2684_encaps { 62enum br2684_encaps {
63 e_vc = BR2684_ENCAPS_VC, 63 e_vc = BR2684_ENCAPS_VC,
@@ -217,8 +217,8 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
217 return 1; 217 return 1;
218} 218}
219 219
220static inline struct br2684_vcc *pick_outgoing_vcc(struct sk_buff *skb, 220static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
221 struct br2684_dev *brdev) 221 const struct br2684_dev *brdev)
222{ 222{
223 return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ 223 return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
224} 224}
diff --git a/net/atm/common.c b/net/atm/common.c
index c865517ba449..d34edbe754c8 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -262,7 +262,7 @@ static int adjust_tp(struct atm_trafprm *tp,unsigned char aal)
262} 262}
263 263
264 264
265static int check_ci(struct atm_vcc *vcc, short vpi, int vci) 265static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
266{ 266{
267 struct hlist_head *head = &vcc_hash[vci & 267 struct hlist_head *head = &vcc_hash[vci &
268 (VCC_HTABLE_SIZE - 1)]; 268 (VCC_HTABLE_SIZE - 1)];
@@ -290,7 +290,7 @@ static int check_ci(struct atm_vcc *vcc, short vpi, int vci)
290} 290}
291 291
292 292
293static int find_ci(struct atm_vcc *vcc, short *vpi, int *vci) 293static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
294{ 294{
295 static short p; /* poor man's per-device cache */ 295 static short p; /* poor man's per-device cache */
296 static int c; 296 static int c;
@@ -646,7 +646,7 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
646} 646}
647 647
648 648
649static int check_tp(struct atm_trafprm *tp) 649static int check_tp(const struct atm_trafprm *tp)
650{ 650{
651 /* @@@ Should be merged with adjust_tp */ 651 /* @@@ Should be merged with adjust_tp */
652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; 652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0;
@@ -663,7 +663,7 @@ static int check_tp(struct atm_trafprm *tp)
663} 663}
664 664
665 665
666static int check_qos(struct atm_qos *qos) 666static int check_qos(const struct atm_qos *qos)
667{ 667{
668 int error; 668 int error;
669 669
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 653aca3573ac..5799fb52365a 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -65,36 +65,36 @@ static int lec_close(struct net_device *dev);
65static struct net_device_stats *lec_get_stats(struct net_device *dev); 65static struct net_device_stats *lec_get_stats(struct net_device *dev);
66static void lec_init(struct net_device *dev); 66static void lec_init(struct net_device *dev);
67static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 67static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
68 unsigned char *mac_addr); 68 const unsigned char *mac_addr);
69static int lec_arp_remove(struct lec_priv *priv, 69static int lec_arp_remove(struct lec_priv *priv,
70 struct lec_arp_table *to_remove); 70 struct lec_arp_table *to_remove);
71/* LANE2 functions */ 71/* LANE2 functions */
72static void lane2_associate_ind(struct net_device *dev, u8 *mac_address, 72static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address,
73 u8 *tlvs, u32 sizeoftlvs); 73 const u8 *tlvs, u32 sizeoftlvs);
74static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, 74static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
75 u8 **tlvs, u32 *sizeoftlvs); 75 u8 **tlvs, u32 *sizeoftlvs);
76static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, 76static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
77 u8 *tlvs, u32 sizeoftlvs); 77 const u8 *tlvs, u32 sizeoftlvs);
78 78
79static int lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, 79static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
80 unsigned long permanent); 80 unsigned long permanent);
81static void lec_arp_check_empties(struct lec_priv *priv, 81static void lec_arp_check_empties(struct lec_priv *priv,
82 struct atm_vcc *vcc, struct sk_buff *skb); 82 struct atm_vcc *vcc, struct sk_buff *skb);
83static void lec_arp_destroy(struct lec_priv *priv); 83static void lec_arp_destroy(struct lec_priv *priv);
84static void lec_arp_init(struct lec_priv *priv); 84static void lec_arp_init(struct lec_priv *priv);
85static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 85static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
86 unsigned char *mac_to_find, 86 const unsigned char *mac_to_find,
87 int is_rdesc, 87 int is_rdesc,
88 struct lec_arp_table **ret_entry); 88 struct lec_arp_table **ret_entry);
89static void lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, 89static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
90 unsigned char *atm_addr, unsigned long remoteflag, 90 const unsigned char *atm_addr, unsigned long remoteflag,
91 unsigned int targetless_le_arp); 91 unsigned int targetless_le_arp);
92static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); 92static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
93static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); 93static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
94static void lec_set_flush_tran_id(struct lec_priv *priv, 94static void lec_set_flush_tran_id(struct lec_priv *priv,
95 unsigned char *atm_addr, 95 const unsigned char *atm_addr,
96 unsigned long tran_id); 96 unsigned long tran_id);
97static void lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, 97static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
98 struct atm_vcc *vcc, 98 struct atm_vcc *vcc,
99 void (*old_push) (struct atm_vcc *vcc, 99 void (*old_push) (struct atm_vcc *vcc,
100 struct sk_buff *skb)); 100 struct sk_buff *skb));
@@ -634,7 +634,7 @@ static struct atm_dev lecatm_dev = {
634 */ 634 */
635static int 635static int
636send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, 636send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
637 unsigned char *mac_addr, unsigned char *atm_addr, 637 const unsigned char *mac_addr, const unsigned char *atm_addr,
638 struct sk_buff *data) 638 struct sk_buff *data)
639{ 639{
640 struct sock *sk; 640 struct sock *sk;
@@ -705,10 +705,9 @@ static void lec_init(struct net_device *dev)
705 dev->set_multicast_list = lec_set_multicast_list; 705 dev->set_multicast_list = lec_set_multicast_list;
706 dev->do_ioctl = NULL; 706 dev->do_ioctl = NULL;
707 printk("%s: Initialized!\n", dev->name); 707 printk("%s: Initialized!\n", dev->name);
708 return;
709} 708}
710 709
711static unsigned char lec_ctrl_magic[] = { 710static const unsigned char lec_ctrl_magic[] = {
712 0xff, 711 0xff,
713 0x00, 712 0x00,
714 0x01, 713 0x01,
@@ -1276,7 +1275,7 @@ module_exit(lane_module_cleanup);
1276 * lec will be used. 1275 * lec will be used.
1277 * If dst_mac == NULL, targetless LE_ARP will be sent 1276 * If dst_mac == NULL, targetless LE_ARP will be sent
1278 */ 1277 */
1279static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, 1278static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
1280 u8 **tlvs, u32 *sizeoftlvs) 1279 u8 **tlvs, u32 *sizeoftlvs)
1281{ 1280{
1282 unsigned long flags; 1281 unsigned long flags;
@@ -1322,8 +1321,8 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force,
1322 * Returns 1 for success, 0 for failure (out of memory) 1321 * Returns 1 for success, 0 for failure (out of memory)
1323 * 1322 *
1324 */ 1323 */
1325static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, 1324static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1326 u8 *tlvs, u32 sizeoftlvs) 1325 const u8 *tlvs, u32 sizeoftlvs)
1327{ 1326{
1328 int retval; 1327 int retval;
1329 struct sk_buff *skb; 1328 struct sk_buff *skb;
@@ -1358,8 +1357,8 @@ static int lane2_associate_req(struct net_device *dev, u8 *lan_dst,
1358 * LANE2: 3.1.5, LE_ASSOCIATE.indication 1357 * LANE2: 3.1.5, LE_ASSOCIATE.indication
1359 * 1358 *
1360 */ 1359 */
1361static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, 1360static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1362 u8 *tlvs, u32 sizeoftlvs) 1361 const u8 *tlvs, u32 sizeoftlvs)
1363{ 1362{
1364#if 0 1363#if 0
1365 int i = 0; 1364 int i = 0;
@@ -1744,7 +1743,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1744 * Find entry by mac_address 1743 * Find entry by mac_address
1745 */ 1744 */
1746static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 1745static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1747 unsigned char *mac_addr) 1746 const unsigned char *mac_addr)
1748{ 1747{
1749 struct hlist_node *node; 1748 struct hlist_node *node;
1750 struct hlist_head *head; 1749 struct hlist_head *head;
@@ -1764,7 +1763,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1764} 1763}
1765 1764
1766static struct lec_arp_table *make_entry(struct lec_priv *priv, 1765static struct lec_arp_table *make_entry(struct lec_priv *priv,
1767 unsigned char *mac_addr) 1766 const unsigned char *mac_addr)
1768{ 1767{
1769 struct lec_arp_table *to_return; 1768 struct lec_arp_table *to_return;
1770 1769
@@ -1921,7 +1920,7 @@ restart:
1921 * 1920 *
1922 */ 1921 */
1923static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 1922static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1924 unsigned char *mac_to_find, int is_rdesc, 1923 const unsigned char *mac_to_find, int is_rdesc,
1925 struct lec_arp_table **ret_entry) 1924 struct lec_arp_table **ret_entry)
1926{ 1925{
1927 unsigned long flags; 1926 unsigned long flags;
@@ -2017,7 +2016,7 @@ out:
2017} 2016}
2018 2017
2019static int 2018static int
2020lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, 2019lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
2021 unsigned long permanent) 2020 unsigned long permanent)
2022{ 2021{
2023 unsigned long flags; 2022 unsigned long flags;
@@ -2047,8 +2046,8 @@ lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr,
2047 * Notifies: Response to arp_request (atm_addr != NULL) 2046 * Notifies: Response to arp_request (atm_addr != NULL)
2048 */ 2047 */
2049static void 2048static void
2050lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, 2049lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2051 unsigned char *atm_addr, unsigned long remoteflag, 2050 const unsigned char *atm_addr, unsigned long remoteflag,
2052 unsigned int targetless_le_arp) 2051 unsigned int targetless_le_arp)
2053{ 2052{
2054 unsigned long flags; 2053 unsigned long flags;
@@ -2148,7 +2147,7 @@ out:
2148 * Notifies: Vcc setup ready 2147 * Notifies: Vcc setup ready
2149 */ 2148 */
2150static void 2149static void
2151lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, 2150lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2152 struct atm_vcc *vcc, 2151 struct atm_vcc *vcc,
2153 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) 2152 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
2154{ 2153{
@@ -2336,7 +2335,7 @@ restart:
2336 2335
2337static void 2336static void
2338lec_set_flush_tran_id(struct lec_priv *priv, 2337lec_set_flush_tran_id(struct lec_priv *priv,
2339 unsigned char *atm_addr, unsigned long tran_id) 2338 const unsigned char *atm_addr, unsigned long tran_id)
2340{ 2339{
2341 unsigned long flags; 2340 unsigned long flags;
2342 struct hlist_node *node; 2341 struct hlist_node *node;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index b41cda7ea1e1..0d376682c1a3 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -42,12 +42,12 @@ struct lecdatahdr_8025 {
42 * 42 *
43 */ 43 */
44struct lane2_ops { 44struct lane2_ops {
45 int (*resolve) (struct net_device *dev, u8 *dst_mac, int force, 45 int (*resolve) (struct net_device *dev, const u8 *dst_mac, int force,
46 u8 **tlvs, u32 *sizeoftlvs); 46 u8 **tlvs, u32 *sizeoftlvs);
47 int (*associate_req) (struct net_device *dev, u8 *lan_dst, 47 int (*associate_req) (struct net_device *dev, const u8 *lan_dst,
48 u8 *tlvs, u32 sizeoftlvs); 48 const u8 *tlvs, u32 sizeoftlvs);
49 void (*associate_indicator) (struct net_device *dev, u8 *mac_addr, 49 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
50 u8 *tlvs, u32 sizeoftlvs); 50 const u8 *tlvs, u32 sizeoftlvs);
51}; 51};
52 52
53/* 53/*
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2712544cf0ca..97eaa23ad9ea 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -893,13 +893,11 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
893 893
894 sk->sk_destruct = ax25_free_sock; 894 sk->sk_destruct = ax25_free_sock;
895 sk->sk_type = osk->sk_type; 895 sk->sk_type = osk->sk_type;
896 sk->sk_socket = osk->sk_socket;
897 sk->sk_priority = osk->sk_priority; 896 sk->sk_priority = osk->sk_priority;
898 sk->sk_protocol = osk->sk_protocol; 897 sk->sk_protocol = osk->sk_protocol;
899 sk->sk_rcvbuf = osk->sk_rcvbuf; 898 sk->sk_rcvbuf = osk->sk_rcvbuf;
900 sk->sk_sndbuf = osk->sk_sndbuf; 899 sk->sk_sndbuf = osk->sk_sndbuf;
901 sk->sk_state = TCP_ESTABLISHED; 900 sk->sk_state = TCP_ESTABLISHED;
902 sk->sk_sleep = osk->sk_sleep;
903 sock_copy_flags(sk, osk); 901 sock_copy_flags(sk, osk);
904 902
905 oax25 = ax25_sk(osk); 903 oax25 = ax25_sk(osk);
@@ -1361,13 +1359,11 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
1361 goto out; 1359 goto out;
1362 1360
1363 newsk = skb->sk; 1361 newsk = skb->sk;
1364 newsk->sk_socket = newsock; 1362 sock_graft(newsk, newsock);
1365 newsk->sk_sleep = &newsock->wait;
1366 1363
1367 /* Now attach up the new socket */ 1364 /* Now attach up the new socket */
1368 kfree_skb(skb); 1365 kfree_skb(skb);
1369 sk->sk_ack_backlog--; 1366 sk->sk_ack_backlog--;
1370 newsock->sk = newsk;
1371 newsock->state = SS_CONNECTED; 1367 newsock->state = SS_CONNECTED;
1372 1368
1373out: 1369out:
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 96e4b9273250..cdc7e751ef36 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -39,11 +39,9 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
39 39
40 switch (ax25->state) { 40 switch (ax25->state) {
41 case AX25_STATE_0: 41 case AX25_STATE_0:
42 /* Magic here: If we listen() and a new link dies before it 42 if (!sk ||
43 is accepted() it isn't 'dead' so doesn't get removed. */ 43 sock_flag(sk, SOCK_DESTROY) ||
44 if (!sk || sock_flag(sk, SOCK_DESTROY) || 44 sock_flag(sk, SOCK_DEAD)) {
45 (sk->sk_state == TCP_LISTEN &&
46 sock_flag(sk, SOCK_DEAD))) {
47 if (sk) { 45 if (sk) {
48 sock_hold(sk); 46 sock_hold(sk);
49 ax25_destroy_socket(ax25); 47 ax25_destroy_socket(ax25);
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index e69244dd8de8..b69bf4e7c48b 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -16,10 +16,6 @@
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18 18
19/*
20 * $Id: bnep.h,v 1.5 2002/08/04 21:23:58 maxk Exp $
21 */
22
23#ifndef _BNEP_H 19#ifndef _BNEP_H
24#define _BNEP_H 20#define _BNEP_H
25 21
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index f85d94643aaf..1d98a1b80da7 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -25,10 +25,6 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/*
29 * $Id: core.c,v 1.20 2002/08/04 21:23:58 maxk Exp $
30 */
31
32#include <linux/module.h> 28#include <linux/module.h>
33 29
34#include <linux/kernel.h> 30#include <linux/kernel.h>
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 95e3837e4312..d9fa0ab2c87f 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,10 +25,6 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/*
29 * $Id: netdev.c,v 1.8 2002/08/04 21:23:58 maxk Exp $
30 */
31
32#include <linux/module.h> 28#include <linux/module.h>
33 29
34#include <linux/socket.h> 30#include <linux/socket.h>
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 201e5b1ce473..8ffb57f2303a 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,10 +24,6 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27/*
28 * $Id: sock.c,v 1.4 2002/08/04 21:23:58 maxk Exp $
29 */
30
31#include <linux/module.h> 27#include <linux/module.h>
32 28
33#include <linux/types.h> 29#include <linux/types.h>
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 0c2c93735e93..b4fb84e398e5 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * Bluetooth RFCOMM core. 25 * Bluetooth RFCOMM core.
26 *
27 * $Id: core.c,v 1.42 2002/10/01 23:26:25 maxk Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5083adcbfae5..c9054487670a 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 *
27 * $Id: sock.c,v 1.24 2002/10/03 01:00:34 maxk Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c9191871c1e0..be84f4fc1477 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * RFCOMM TTY. 25 * RFCOMM TTY.
26 *
27 * $Id: tty.c,v 1.24 2002/10/03 01:54:38 holtmann Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 12265aff7099..e143ca678881 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -5,6 +5,7 @@
5config BRIDGE 5config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC 7 select LLC
8 select STP
8 ---help--- 9 ---help---
9 If you say Y here, then your Linux box will be able to act as an 10 If you say Y here, then your Linux box will be able to act as an
10 Ethernet bridge, which means that the different Ethernet segments it 11 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 8f3c58e5f7a5..573acdf6f9ff 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br.c,v 1.47 2001/12/24 00:56:41 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -20,21 +18,24 @@
20#include <linux/init.h> 18#include <linux/init.h>
21#include <linux/llc.h> 19#include <linux/llc.h>
22#include <net/llc.h> 20#include <net/llc.h>
21#include <net/stp.h>
23 22
24#include "br_private.h" 23#include "br_private.h"
25 24
26int (*br_should_route_hook)(struct sk_buff *skb); 25int (*br_should_route_hook)(struct sk_buff *skb);
27 26
28static struct llc_sap *br_stp_sap; 27static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv,
29};
29 30
30static int __init br_init(void) 31static int __init br_init(void)
31{ 32{
32 int err; 33 int err;
33 34
34 br_stp_sap = llc_sap_open(LLC_SAP_BSPAN, br_stp_rcv); 35 err = stp_proto_register(&br_stp_proto);
35 if (!br_stp_sap) { 36 if (err < 0) {
36 printk(KERN_ERR "bridge: can't register sap for STP\n"); 37 printk(KERN_ERR "bridge: can't register sap for STP\n");
37 return -EADDRINUSE; 38 return err;
38 } 39 }
39 40
40 err = br_fdb_init(); 41 err = br_fdb_init();
@@ -67,13 +68,13 @@ err_out2:
67err_out1: 68err_out1:
68 br_fdb_fini(); 69 br_fdb_fini();
69err_out: 70err_out:
70 llc_sap_put(br_stp_sap); 71 stp_proto_unregister(&br_stp_proto);
71 return err; 72 return err;
72} 73}
73 74
74static void __exit br_deinit(void) 75static void __exit br_deinit(void)
75{ 76{
76 rcu_assign_pointer(br_stp_sap->rcv_func, NULL); 77 stp_proto_unregister(&br_stp_proto);
77 78
78 br_netlink_fini(); 79 br_netlink_fini();
79 unregister_netdevice_notifier(&br_device_notifier); 80 unregister_netdevice_notifier(&br_device_notifier);
@@ -84,7 +85,6 @@ static void __exit br_deinit(void)
84 synchronize_net(); 85 synchronize_net();
85 86
86 br_netfilter_fini(); 87 br_netfilter_fini();
87 llc_sap_put(br_stp_sap);
88 br_fdb_get_hook = NULL; 88 br_fdb_get_hook = NULL;
89 br_fdb_put_hook = NULL; 89 br_fdb_put_hook = NULL;
90 90
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index bf7787395fe0..d9449df7cad5 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_device.c,v 1.6 2001/12/24 00:59:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -21,12 +19,6 @@
21#include <asm/uaccess.h> 19#include <asm/uaccess.h>
22#include "br_private.h" 20#include "br_private.h"
23 21
24static struct net_device_stats *br_dev_get_stats(struct net_device *dev)
25{
26 struct net_bridge *br = netdev_priv(dev);
27 return &br->statistics;
28}
29
30/* net device transmit always called with no BH (preempt_disabled) */ 22/* net device transmit always called with no BH (preempt_disabled) */
31int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 23int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
32{ 24{
@@ -34,8 +26,8 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
34 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
35 struct net_bridge_fdb_entry *dst; 27 struct net_bridge_fdb_entry *dst;
36 28
37 br->statistics.tx_packets++; 29 dev->stats.tx_packets++;
38 br->statistics.tx_bytes += skb->len; 30 dev->stats.tx_bytes += skb->len;
39 31
40 skb_reset_mac_header(skb); 32 skb_reset_mac_header(skb);
41 skb_pull(skb, ETH_HLEN); 33 skb_pull(skb, ETH_HLEN);
@@ -95,6 +87,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
95 spin_lock_bh(&br->lock); 87 spin_lock_bh(&br->lock);
96 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 88 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
97 br_stp_change_bridge_id(br, addr->sa_data); 89 br_stp_change_bridge_id(br, addr->sa_data);
90 br->flags |= BR_SET_MAC_ADDR;
98 spin_unlock_bh(&br->lock); 91 spin_unlock_bh(&br->lock);
99 92
100 return 0; 93 return 0;
@@ -161,7 +154,6 @@ void br_dev_setup(struct net_device *dev)
161 ether_setup(dev); 154 ether_setup(dev);
162 155
163 dev->do_ioctl = br_dev_ioctl; 156 dev->do_ioctl = br_dev_ioctl;
164 dev->get_stats = br_dev_get_stats;
165 dev->hard_start_xmit = br_dev_xmit; 157 dev->hard_start_xmit = br_dev_xmit;
166 dev->open = br_dev_open; 158 dev->open = br_dev_open;
167 dev->set_multicast_list = br_dev_set_multicast_list; 159 dev->set_multicast_list = br_dev_set_multicast_list;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 142060f02054..a48f5efdb6bf 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_fdb.c,v 1.6 2002/01/17 00:57:07 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bdd7c35c3c7b..bdd9ccea17ce 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -91,7 +89,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
91/* called with rcu_read_lock */ 89/* called with rcu_read_lock */
92void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 90void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
93{ 91{
94 if (should_deliver(to, skb)) { 92 if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) {
95 __br_forward(to, skb); 93 __br_forward(to, skb);
96 return; 94 return;
97 } 95 }
@@ -115,7 +113,7 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
115 struct sk_buff *skb2; 113 struct sk_buff *skb2;
116 114
117 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { 115 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
118 br->statistics.tx_dropped++; 116 br->dev->stats.tx_dropped++;
119 kfree_skb(skb); 117 kfree_skb(skb);
120 return; 118 return;
121 } 119 }
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f38cc5317b88..a072ea5ca6f5 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_if.c,v 1.7 2001/12/24 00:59:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -375,6 +373,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
375 if (IS_ERR(p)) 373 if (IS_ERR(p))
376 return PTR_ERR(p); 374 return PTR_ERR(p);
377 375
376 err = dev_set_promiscuity(dev, 1);
377 if (err)
378 goto put_back;
379
378 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), 380 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
379 SYSFS_BRIDGE_PORT_ATTR); 381 SYSFS_BRIDGE_PORT_ATTR);
380 if (err) 382 if (err)
@@ -389,7 +391,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
389 goto err2; 391 goto err2;
390 392
391 rcu_assign_pointer(dev->br_port, p); 393 rcu_assign_pointer(dev->br_port, p);
392 dev_set_promiscuity(dev, 1); 394 dev_disable_lro(dev);
393 395
394 list_add_rcu(&p->list, &br->port_list); 396 list_add_rcu(&p->list, &br->port_list);
395 397
@@ -413,12 +415,12 @@ err2:
413 br_fdb_delete_by_port(br, p, 1); 415 br_fdb_delete_by_port(br, p, 1);
414err1: 416err1:
415 kobject_del(&p->kobj); 417 kobject_del(&p->kobj);
416 goto put_back;
417err0: 418err0:
418 kobject_put(&p->kobj); 419 kobject_put(&p->kobj);
419 420 dev_set_promiscuity(dev, -1);
420put_back: 421put_back:
421 dev_put(dev); 422 dev_put(dev);
423 kfree(p);
422 return err; 424 return err;
423} 425}
424 426
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 255c00f60ce7..30b88777c3df 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_input.c,v 1.10 2001/12/24 04:50:20 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -24,13 +22,13 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
24 22
25static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) 23static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
26{ 24{
27 struct net_device *indev; 25 struct net_device *indev, *brdev = br->dev;
28 26
29 br->statistics.rx_packets++; 27 brdev->stats.rx_packets++;
30 br->statistics.rx_bytes += skb->len; 28 brdev->stats.rx_bytes += skb->len;
31 29
32 indev = skb->dev; 30 indev = skb->dev;
33 skb->dev = br->dev; 31 skb->dev = brdev;
34 32
35 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 33 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
36 netif_receive_skb); 34 netif_receive_skb);
@@ -64,7 +62,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
64 dst = NULL; 62 dst = NULL;
65 63
66 if (is_multicast_ether_addr(dest)) { 64 if (is_multicast_ether_addr(dest)) {
67 br->statistics.multicast++; 65 br->dev->stats.multicast++;
68 skb2 = skb; 66 skb2 = skb;
69 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 67 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
70 skb2 = skb; 68 skb2 = skb;
@@ -136,14 +134,11 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
136 if (skb->protocol == htons(ETH_P_PAUSE)) 134 if (skb->protocol == htons(ETH_P_PAUSE))
137 goto drop; 135 goto drop;
138 136
139 /* Process STP BPDU's through normal netif_receive_skb() path */ 137 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
140 if (p->br->stp_enabled != BR_NO_STP) { 138 NULL, br_handle_local_finish))
141 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 139 return NULL; /* frame consumed by filter */
142 NULL, br_handle_local_finish)) 140 else
143 return NULL; 141 return skb; /* continue processing */
144 else
145 return skb;
146 }
147 } 142 }
148 143
149 switch (p->state) { 144 switch (p->state) {
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 0655a5f07f58..eeee218eed80 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_ioctl.c,v 1.4 2000/11/08 05:16:40 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 00644a544e3c..88d8ec7b3142 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_notify.c,v 1.2 2000/02/21 15:51:34 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c11b554fd109..815ed38925b2 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Lennert Buytenhek <buytenh@gnu.org> 5 * Lennert Buytenhek <buytenh@gnu.org>
6 * 6 *
7 * $Id: br_private.h,v 1.7 2001/12/24 00:59:55 davem Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
@@ -90,11 +88,12 @@ struct net_bridge
90 spinlock_t lock; 88 spinlock_t lock;
91 struct list_head port_list; 89 struct list_head port_list;
92 struct net_device *dev; 90 struct net_device *dev;
93 struct net_device_stats statistics;
94 spinlock_t hash_lock; 91 spinlock_t hash_lock;
95 struct hlist_head hash[BR_HASH_SIZE]; 92 struct hlist_head hash[BR_HASH_SIZE];
96 struct list_head age_list; 93 struct list_head age_list;
97 unsigned long feature_mask; 94 unsigned long feature_mask;
95 unsigned long flags;
96#define BR_SET_MAC_ADDR 0x00000001
98 97
99 /* STP */ 98 /* STP */
100 bridge_id designated_root; 99 bridge_id designated_root;
@@ -227,8 +226,9 @@ extern void br_stp_set_path_cost(struct net_bridge_port *p,
227extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 226extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
228 227
229/* br_stp_bpdu.c */ 228/* br_stp_bpdu.c */
230extern int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, 229struct stp_proto;
231 struct packet_type *pt, struct net_device *orig_dev); 230extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
231 struct net_device *dev);
232 232
233/* br_stp_timer.c */ 233/* br_stp_timer.c */
234extern void br_stp_timer_init(struct net_bridge *br); 234extern void br_stp_timer_init(struct net_bridge *br);
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index e29f01ac1adf..8b650f7fbfa0 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Lennert Buytenhek <buytenh@gnu.org> 5 * Lennert Buytenhek <buytenh@gnu.org>
6 * 6 *
7 * $Id: br_private_stp.h,v 1.3 2001/02/05 06:03:47 davem Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 9e96ffcd29a3..921bbe5cb94a 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp.c,v 1.4 2000/06/19 10:13:35 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index ddeb6e5d45d6..996476174517 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_bpdu.c,v 1.3 2001/11/10 02:35:25 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -20,6 +18,7 @@
20#include <net/net_namespace.h> 18#include <net/net_namespace.h>
21#include <net/llc.h> 19#include <net/llc.h>
22#include <net/llc_pdu.h> 20#include <net/llc_pdu.h>
21#include <net/stp.h>
23#include <asm/unaligned.h> 22#include <asm/unaligned.h>
24 23
25#include "br_private.h" 24#include "br_private.h"
@@ -133,10 +132,9 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
133 * 132 *
134 * NO locks, but rcu_read_lock (preempt_disabled) 133 * NO locks, but rcu_read_lock (preempt_disabled)
135 */ 134 */
136int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, 135void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
137 struct packet_type *pt, struct net_device *orig_dev) 136 struct net_device *dev)
138{ 137{
139 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
140 const unsigned char *dest = eth_hdr(skb)->h_dest; 138 const unsigned char *dest = eth_hdr(skb)->h_dest;
141 struct net_bridge_port *p = rcu_dereference(dev->br_port); 139 struct net_bridge_port *p = rcu_dereference(dev->br_port);
142 struct net_bridge *br; 140 struct net_bridge *br;
@@ -148,11 +146,6 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
148 if (!p) 146 if (!p)
149 goto err; 147 goto err;
150 148
151 if (pdu->ssap != LLC_SAP_BSPAN
152 || pdu->dsap != LLC_SAP_BSPAN
153 || pdu->ctrl_1 != LLC_PDU_TYPE_U)
154 goto err;
155
156 if (!pskb_may_pull(skb, 4)) 149 if (!pskb_may_pull(skb, 4))
157 goto err; 150 goto err;
158 151
@@ -226,5 +219,4 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
226 spin_unlock(&br->lock); 219 spin_unlock(&br->lock);
227 err: 220 err:
228 kfree_skb(skb); 221 kfree_skb(skb);
229 return 0;
230} 222}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 1a430eccec9b..9a52ac5b4525 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_if.c,v 1.4 2001/04/14 21:14:39 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -216,6 +214,10 @@ void br_stp_recalculate_bridge_id(struct net_bridge *br)
216 const unsigned char *addr = br_mac_zero; 214 const unsigned char *addr = br_mac_zero;
217 struct net_bridge_port *p; 215 struct net_bridge_port *p;
218 216
217 /* user has chosen a value so keep it */
218 if (br->flags & BR_SET_MAC_ADDR)
219 return;
220
219 list_for_each_entry(p, &br->port_list, list) { 221 list_for_each_entry(p, &br->port_list, list) {
220 if (addr == br_mac_zero || 222 if (addr == br_mac_zero ||
221 memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0) 223 memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 77f5255e6915..772a140bfdf0 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_timer.c,v 1.3 2000/05/05 02:17:17 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 7beeefa0f9c0..909479794999 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -83,6 +83,15 @@ config BRIDGE_EBT_IP
83 83
84 To compile it as a module, choose M here. If unsure, say N. 84 To compile it as a module, choose M here. If unsure, say N.
85 85
86config BRIDGE_EBT_IP6
87 tristate "ebt: IP6 filter support"
88 depends on BRIDGE_NF_EBTABLES && IPV6
89 help
90 This option adds the IP6 match, which allows basic IPV6 header field
91 filtering.
92
93 To compile it as a module, choose M here. If unsure, say N.
94
86config BRIDGE_EBT_LIMIT 95config BRIDGE_EBT_LIMIT
87 tristate "ebt: limit match support" 96 tristate "ebt: limit match support"
88 depends on BRIDGE_NF_EBTABLES 97 depends on BRIDGE_NF_EBTABLES
@@ -221,7 +230,7 @@ config BRIDGE_EBT_NFLOG
221 either the old LOG target, the old ULOG target or nfnetlink_log 230 either the old LOG target, the old ULOG target or nfnetlink_log
222 as backend. 231 as backend.
223 232
224 This option adds the ulog watcher, that you can use in any rule 233 This option adds the nflog watcher, that you can use in any rule
225 in any ebtables table. 234 in any ebtables table.
226 235
227 To compile it as a module, choose M here. If unsure, say N. 236 To compile it as a module, choose M here. If unsure, say N.
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 83715d73a503..0718699540b0 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_BRIDGE_EBT_802_3) += ebt_802_3.o
14obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o 14obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o
15obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o 15obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o
16obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o 16obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o
17obj-$(CONFIG_BRIDGE_EBT_IP6) += ebt_ip6.o
17obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o 18obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o
18obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o 19obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o
19obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o 20obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
new file mode 100644
index 000000000000..36efb3a75249
--- /dev/null
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -0,0 +1,144 @@
1/*
2 * ebt_ip6
3 *
4 * Authors:
5 * Manohar Castelino <manohar.r.castelino@intel.com>
6 * Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
7 * Jan Engelhardt <jengelh@computergmbh.de>
8 *
9 * Summary:
10 * This is just a modification of the IPv4 code written by
11 * Bart De Schuymer <bdschuym@pandora.be>
12 * with the changes required to support IPv6
13 *
14 * Jan, 2008
15 */
16
17#include <linux/netfilter_bridge/ebtables.h>
18#include <linux/netfilter_bridge/ebt_ip6.h>
19#include <linux/ipv6.h>
20#include <net/ipv6.h>
21#include <linux/in.h>
22#include <linux/module.h>
23#include <net/dsfield.h>
24
25struct tcpudphdr {
26 __be16 src;
27 __be16 dst;
28};
29
30static int ebt_filter_ip6(const struct sk_buff *skb,
31 const struct net_device *in,
32 const struct net_device *out, const void *data,
33 unsigned int datalen)
34{
35 const struct ebt_ip6_info *info = (struct ebt_ip6_info *)data;
36 const struct ipv6hdr *ih6;
37 struct ipv6hdr _ip6h;
38 const struct tcpudphdr *pptr;
39 struct tcpudphdr _ports;
40 struct in6_addr tmp_addr;
41 int i;
42
43 ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
44 if (ih6 == NULL)
45 return EBT_NOMATCH;
46 if (info->bitmask & EBT_IP6_TCLASS &&
47 FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
48 return EBT_NOMATCH;
49 for (i = 0; i < 4; i++)
50 tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] &
51 info->smsk.in6_u.u6_addr32[i];
52 if (info->bitmask & EBT_IP6_SOURCE &&
53 FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0),
54 EBT_IP6_SOURCE))
55 return EBT_NOMATCH;
56 for (i = 0; i < 4; i++)
57 tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] &
58 info->dmsk.in6_u.u6_addr32[i];
59 if (info->bitmask & EBT_IP6_DEST &&
60 FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST))
61 return EBT_NOMATCH;
62 if (info->bitmask & EBT_IP6_PROTO) {
63 uint8_t nexthdr = ih6->nexthdr;
64 int offset_ph;
65
66 offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr);
67 if (offset_ph == -1)
68 return EBT_NOMATCH;
69 if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
70 return EBT_NOMATCH;
71 if (!(info->bitmask & EBT_IP6_DPORT) &&
72 !(info->bitmask & EBT_IP6_SPORT))
73 return EBT_MATCH;
74 pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
75 &_ports);
76 if (pptr == NULL)
77 return EBT_NOMATCH;
78 if (info->bitmask & EBT_IP6_DPORT) {
79 u32 dst = ntohs(pptr->dst);
80 if (FWINV(dst < info->dport[0] ||
81 dst > info->dport[1], EBT_IP6_DPORT))
82 return EBT_NOMATCH;
83 }
84 if (info->bitmask & EBT_IP6_SPORT) {
85 u32 src = ntohs(pptr->src);
86 if (FWINV(src < info->sport[0] ||
87 src > info->sport[1], EBT_IP6_SPORT))
88 return EBT_NOMATCH;
89 }
90 return EBT_MATCH;
91 }
92 return EBT_MATCH;
93}
94
95static int ebt_ip6_check(const char *tablename, unsigned int hookmask,
96 const struct ebt_entry *e, void *data, unsigned int datalen)
97{
98 struct ebt_ip6_info *info = (struct ebt_ip6_info *)data;
99
100 if (datalen != EBT_ALIGN(sizeof(struct ebt_ip6_info)))
101 return -EINVAL;
102 if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
103 return -EINVAL;
104 if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
105 return -EINVAL;
106 if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
107 if (info->invflags & EBT_IP6_PROTO)
108 return -EINVAL;
109 if (info->protocol != IPPROTO_TCP &&
110 info->protocol != IPPROTO_UDP &&
111 info->protocol != IPPROTO_UDPLITE &&
112 info->protocol != IPPROTO_SCTP &&
113 info->protocol != IPPROTO_DCCP)
114 return -EINVAL;
115 }
116 if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
117 return -EINVAL;
118 if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
119 return -EINVAL;
120 return 0;
121}
122
123static struct ebt_match filter_ip6 =
124{
125 .name = EBT_IP6_MATCH,
126 .match = ebt_filter_ip6,
127 .check = ebt_ip6_check,
128 .me = THIS_MODULE,
129};
130
131static int __init ebt_ip6_init(void)
132{
133 return ebt_register_match(&filter_ip6);
134}
135
136static void __exit ebt_ip6_fini(void)
137{
138 ebt_unregister_match(&filter_ip6);
139}
140
141module_init(ebt_ip6_init);
142module_exit(ebt_ip6_fini);
143MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
144MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 0b209e4aad0a..2f430d4ae911 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -18,6 +18,9 @@
18#include <linux/if_arp.h> 18#include <linux/if_arp.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <net/netfilter/nf_log.h> 20#include <net/netfilter/nf_log.h>
21#include <linux/ipv6.h>
22#include <net/ipv6.h>
23#include <linux/in6.h>
21 24
22static DEFINE_SPINLOCK(ebt_log_lock); 25static DEFINE_SPINLOCK(ebt_log_lock);
23 26
@@ -58,6 +61,27 @@ static void print_MAC(const unsigned char *p)
58 printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':'); 61 printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':');
59} 62}
60 63
64static void
65print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
66{
67 if (protocol == IPPROTO_TCP ||
68 protocol == IPPROTO_UDP ||
69 protocol == IPPROTO_UDPLITE ||
70 protocol == IPPROTO_SCTP ||
71 protocol == IPPROTO_DCCP) {
72 const struct tcpudphdr *pptr;
73 struct tcpudphdr _ports;
74
75 pptr = skb_header_pointer(skb, offset,
76 sizeof(_ports), &_ports);
77 if (pptr == NULL) {
78 printk(" INCOMPLETE TCP/UDP header");
79 return;
80 }
81 printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
82 }
83}
84
61#define myNIPQUAD(a) a[0], a[1], a[2], a[3] 85#define myNIPQUAD(a) a[0], a[1], a[2], a[3]
62static void 86static void
63ebt_log_packet(unsigned int pf, unsigned int hooknum, 87ebt_log_packet(unsigned int pf, unsigned int hooknum,
@@ -95,25 +119,35 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
95 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP " 119 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP "
96 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), 120 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr),
97 NIPQUAD(ih->daddr), ih->tos, ih->protocol); 121 NIPQUAD(ih->daddr), ih->tos, ih->protocol);
98 if (ih->protocol == IPPROTO_TCP || 122 print_ports(skb, ih->protocol, ih->ihl*4);
99 ih->protocol == IPPROTO_UDP || 123 goto out;
100 ih->protocol == IPPROTO_UDPLITE || 124 }
101 ih->protocol == IPPROTO_SCTP || 125
102 ih->protocol == IPPROTO_DCCP) { 126#if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE)
103 const struct tcpudphdr *pptr; 127 if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
104 struct tcpudphdr _ports; 128 htons(ETH_P_IPV6)) {
105 129 const struct ipv6hdr *ih;
106 pptr = skb_header_pointer(skb, ih->ihl*4, 130 struct ipv6hdr _iph;
107 sizeof(_ports), &_ports); 131 uint8_t nexthdr;
108 if (pptr == NULL) { 132 int offset_ph;
109 printk(" INCOMPLETE TCP/UDP header"); 133
110 goto out; 134 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
111 } 135 if (ih == NULL) {
112 printk(" SPT=%u DPT=%u", ntohs(pptr->src), 136 printk(" INCOMPLETE IPv6 header");
113 ntohs(pptr->dst)); 137 goto out;
114 } 138 }
139 printk(" IPv6 SRC=%x:%x:%x:%x:%x:%x:%x:%x "
140 "IPv6 DST=%x:%x:%x:%x:%x:%x:%x:%x, IPv6 "
141 "priority=0x%01X, Next Header=%d", NIP6(ih->saddr),
142 NIP6(ih->daddr), ih->priority, ih->nexthdr);
143 nexthdr = ih->nexthdr;
144 offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr);
145 if (offset_ph == -1)
146 goto out;
147 print_ports(skb, nexthdr, offset_ph);
115 goto out; 148 goto out;
116 } 149 }
150#endif
117 151
118 if ((bitmask & EBT_LOG_ARP) && 152 if ((bitmask & EBT_LOG_ARP) &&
119 ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) || 153 ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) ||
diff --git a/net/core/dev.c b/net/core/dev.c
index 821cb1628e5e..e54acde839da 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -90,6 +90,7 @@
90#include <linux/if_ether.h> 90#include <linux/if_ether.h>
91#include <linux/netdevice.h> 91#include <linux/netdevice.h>
92#include <linux/etherdevice.h> 92#include <linux/etherdevice.h>
93#include <linux/ethtool.h>
93#include <linux/notifier.h> 94#include <linux/notifier.h>
94#include <linux/skbuff.h> 95#include <linux/skbuff.h>
95#include <net/net_namespace.h> 96#include <net/net_namespace.h>
@@ -120,6 +121,9 @@
120#include <linux/ctype.h> 121#include <linux/ctype.h>
121#include <linux/if_arp.h> 122#include <linux/if_arp.h>
122#include <linux/if_vlan.h> 123#include <linux/if_vlan.h>
124#include <linux/ip.h>
125#include <linux/ipv6.h>
126#include <linux/in.h>
123 127
124#include "net-sysfs.h" 128#include "net-sysfs.h"
125 129
@@ -257,7 +261,7 @@ DEFINE_PER_CPU(struct softnet_data, softnet_data);
257 261
258#ifdef CONFIG_DEBUG_LOCK_ALLOC 262#ifdef CONFIG_DEBUG_LOCK_ALLOC
259/* 263/*
260 * register_netdevice() inits dev->_xmit_lock and sets lockdep class 264 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
261 * according to dev->type 265 * according to dev->type
262 */ 266 */
263static const unsigned short netdev_lock_type[] = 267static const unsigned short netdev_lock_type[] =
@@ -961,6 +965,12 @@ void netdev_state_change(struct net_device *dev)
961 } 965 }
962} 966}
963 967
968void netdev_bonding_change(struct net_device *dev)
969{
970 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
971}
972EXPORT_SYMBOL(netdev_bonding_change);
973
964/** 974/**
965 * dev_load - load a network module 975 * dev_load - load a network module
966 * @net: the applicable net namespace 976 * @net: the applicable net namespace
@@ -1117,6 +1127,29 @@ int dev_close(struct net_device *dev)
1117} 1127}
1118 1128
1119 1129
1130/**
1131 * dev_disable_lro - disable Large Receive Offload on a device
1132 * @dev: device
1133 *
1134 * Disable Large Receive Offload (LRO) on a net device. Must be
1135 * called under RTNL. This is needed if received packets may be
1136 * forwarded to another interface.
1137 */
1138void dev_disable_lro(struct net_device *dev)
1139{
1140 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1141 dev->ethtool_ops->set_flags) {
1142 u32 flags = dev->ethtool_ops->get_flags(dev);
1143 if (flags & ETH_FLAG_LRO) {
1144 flags &= ~ETH_FLAG_LRO;
1145 dev->ethtool_ops->set_flags(dev, flags);
1146 }
1147 }
1148 WARN_ON(dev->features & NETIF_F_LRO);
1149}
1150EXPORT_SYMBOL(dev_disable_lro);
1151
1152
1120static int dev_boot_phase = 1; 1153static int dev_boot_phase = 1;
1121 1154
1122/* 1155/*
@@ -1290,16 +1323,18 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1290} 1323}
1291 1324
1292 1325
1293void __netif_schedule(struct net_device *dev) 1326void __netif_schedule(struct Qdisc *q)
1294{ 1327{
1295 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { 1328 BUG_ON(q == &noop_qdisc);
1296 unsigned long flags; 1329
1330 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
1297 struct softnet_data *sd; 1331 struct softnet_data *sd;
1332 unsigned long flags;
1298 1333
1299 local_irq_save(flags); 1334 local_irq_save(flags);
1300 sd = &__get_cpu_var(softnet_data); 1335 sd = &__get_cpu_var(softnet_data);
1301 dev->next_sched = sd->output_queue; 1336 q->next_sched = sd->output_queue;
1302 sd->output_queue = dev; 1337 sd->output_queue = q;
1303 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1338 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1304 local_irq_restore(flags); 1339 local_irq_restore(flags);
1305 } 1340 }
@@ -1566,7 +1601,8 @@ static int dev_gso_segment(struct sk_buff *skb)
1566 return 0; 1601 return 0;
1567} 1602}
1568 1603
1569int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 1604int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1605 struct netdev_queue *txq)
1570{ 1606{
1571 if (likely(!skb->next)) { 1607 if (likely(!skb->next)) {
1572 if (!list_empty(&ptype_all)) 1608 if (!list_empty(&ptype_all))
@@ -1595,9 +1631,7 @@ gso:
1595 skb->next = nskb; 1631 skb->next = nskb;
1596 return rc; 1632 return rc;
1597 } 1633 }
1598 if (unlikely((netif_queue_stopped(dev) || 1634 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1599 netif_subqueue_stopped(dev, skb)) &&
1600 skb->next))
1601 return NETDEV_TX_BUSY; 1635 return NETDEV_TX_BUSY;
1602 } while (skb->next); 1636 } while (skb->next);
1603 1637
@@ -1634,9 +1668,71 @@ out_kfree_skb:
1634 * --BLG 1668 * --BLG
1635 */ 1669 */
1636 1670
1671static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1672{
1673 u32 *addr, *ports, hash, ihl;
1674 u8 ip_proto;
1675 int alen;
1676
1677 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol;
1680 addr = &ip_hdr(skb)->saddr;
1681 ihl = ip_hdr(skb)->ihl;
1682 alen = 2;
1683 break;
1684 case __constant_htons(ETH_P_IPV6):
1685 ip_proto = ipv6_hdr(skb)->nexthdr;
1686 addr = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1687 ihl = (40 >> 2);
1688 alen = 8;
1689 break;
1690 default:
1691 return 0;
1692 }
1693
1694 ports = (u32 *) (skb_network_header(skb) + (ihl * 4));
1695
1696 hash = 0;
1697 while (alen--)
1698 hash ^= *addr++;
1699
1700 switch (ip_proto) {
1701 case IPPROTO_TCP:
1702 case IPPROTO_UDP:
1703 case IPPROTO_DCCP:
1704 case IPPROTO_ESP:
1705 case IPPROTO_AH:
1706 case IPPROTO_SCTP:
1707 case IPPROTO_UDPLITE:
1708 hash ^= *ports;
1709 break;
1710
1711 default:
1712 break;
1713 }
1714
1715 return hash % dev->real_num_tx_queues;
1716}
1717
1718static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1719 struct sk_buff *skb)
1720{
1721 u16 queue_index = 0;
1722
1723 if (dev->select_queue)
1724 queue_index = dev->select_queue(dev, skb);
1725 else if (dev->real_num_tx_queues > 1)
1726 queue_index = simple_tx_hash(dev, skb);
1727
1728 skb_set_queue_mapping(skb, queue_index);
1729 return netdev_get_tx_queue(dev, queue_index);
1730}
1731
1637int dev_queue_xmit(struct sk_buff *skb) 1732int dev_queue_xmit(struct sk_buff *skb)
1638{ 1733{
1639 struct net_device *dev = skb->dev; 1734 struct net_device *dev = skb->dev;
1735 struct netdev_queue *txq;
1640 struct Qdisc *q; 1736 struct Qdisc *q;
1641 int rc = -ENOMEM; 1737 int rc = -ENOMEM;
1642 1738
@@ -1669,44 +1765,29 @@ int dev_queue_xmit(struct sk_buff *skb)
1669 } 1765 }
1670 1766
1671gso: 1767gso:
1672 spin_lock_prefetch(&dev->queue_lock);
1673
1674 /* Disable soft irqs for various locks below. Also 1768 /* Disable soft irqs for various locks below. Also
1675 * stops preemption for RCU. 1769 * stops preemption for RCU.
1676 */ 1770 */
1677 rcu_read_lock_bh(); 1771 rcu_read_lock_bh();
1678 1772
1679 /* Updates of qdisc are serialized by queue_lock. 1773 txq = dev_pick_tx(dev, skb);
1680 * The struct Qdisc which is pointed to by qdisc is now a 1774 q = rcu_dereference(txq->qdisc);
1681 * rcu structure - it may be accessed without acquiring
1682 * a lock (but the structure may be stale.) The freeing of the
1683 * qdisc will be deferred until it's known that there are no
1684 * more references to it.
1685 *
1686 * If the qdisc has an enqueue function, we still need to
1687 * hold the queue_lock before calling it, since queue_lock
1688 * also serializes access to the device queue.
1689 */
1690 1775
1691 q = rcu_dereference(dev->qdisc);
1692#ifdef CONFIG_NET_CLS_ACT 1776#ifdef CONFIG_NET_CLS_ACT
1693 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1777 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1694#endif 1778#endif
1695 if (q->enqueue) { 1779 if (q->enqueue) {
1696 /* Grab device queue */ 1780 spinlock_t *root_lock = qdisc_root_lock(q);
1697 spin_lock(&dev->queue_lock); 1781
1698 q = dev->qdisc; 1782 spin_lock(root_lock);
1699 if (q->enqueue) { 1783
1700 /* reset queue_mapping to zero */ 1784 rc = q->enqueue(skb, q);
1701 skb_set_queue_mapping(skb, 0); 1785 qdisc_run(q);
1702 rc = q->enqueue(skb, q); 1786
1703 qdisc_run(dev); 1787 spin_unlock(root_lock);
1704 spin_unlock(&dev->queue_lock); 1788
1705 1789 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1706 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; 1790 goto out;
1707 goto out;
1708 }
1709 spin_unlock(&dev->queue_lock);
1710 } 1791 }
1711 1792
1712 /* The device has no queue. Common case for software devices: 1793 /* The device has no queue. Common case for software devices:
@@ -1724,19 +1805,18 @@ gso:
1724 if (dev->flags & IFF_UP) { 1805 if (dev->flags & IFF_UP) {
1725 int cpu = smp_processor_id(); /* ok because BHs are off */ 1806 int cpu = smp_processor_id(); /* ok because BHs are off */
1726 1807
1727 if (dev->xmit_lock_owner != cpu) { 1808 if (txq->xmit_lock_owner != cpu) {
1728 1809
1729 HARD_TX_LOCK(dev, cpu); 1810 HARD_TX_LOCK(dev, txq, cpu);
1730 1811
1731 if (!netif_queue_stopped(dev) && 1812 if (!netif_tx_queue_stopped(txq)) {
1732 !netif_subqueue_stopped(dev, skb)) {
1733 rc = 0; 1813 rc = 0;
1734 if (!dev_hard_start_xmit(skb, dev)) { 1814 if (!dev_hard_start_xmit(skb, dev, txq)) {
1735 HARD_TX_UNLOCK(dev); 1815 HARD_TX_UNLOCK(dev, txq);
1736 goto out; 1816 goto out;
1737 } 1817 }
1738 } 1818 }
1739 HARD_TX_UNLOCK(dev); 1819 HARD_TX_UNLOCK(dev, txq);
1740 if (net_ratelimit()) 1820 if (net_ratelimit())
1741 printk(KERN_CRIT "Virtual device %s asks to " 1821 printk(KERN_CRIT "Virtual device %s asks to "
1742 "queue packet!\n", dev->name); 1822 "queue packet!\n", dev->name);
@@ -1880,7 +1960,7 @@ static void net_tx_action(struct softirq_action *h)
1880 } 1960 }
1881 1961
1882 if (sd->output_queue) { 1962 if (sd->output_queue) {
1883 struct net_device *head; 1963 struct Qdisc *head;
1884 1964
1885 local_irq_disable(); 1965 local_irq_disable();
1886 head = sd->output_queue; 1966 head = sd->output_queue;
@@ -1888,17 +1968,20 @@ static void net_tx_action(struct softirq_action *h)
1888 local_irq_enable(); 1968 local_irq_enable();
1889 1969
1890 while (head) { 1970 while (head) {
1891 struct net_device *dev = head; 1971 struct Qdisc *q = head;
1972 spinlock_t *root_lock;
1973
1892 head = head->next_sched; 1974 head = head->next_sched;
1893 1975
1894 smp_mb__before_clear_bit(); 1976 smp_mb__before_clear_bit();
1895 clear_bit(__LINK_STATE_SCHED, &dev->state); 1977 clear_bit(__QDISC_STATE_SCHED, &q->state);
1896 1978
1897 if (spin_trylock(&dev->queue_lock)) { 1979 root_lock = qdisc_root_lock(q);
1898 qdisc_run(dev); 1980 if (spin_trylock(root_lock)) {
1899 spin_unlock(&dev->queue_lock); 1981 qdisc_run(q);
1982 spin_unlock(root_lock);
1900 } else { 1983 } else {
1901 netif_schedule(dev); 1984 __netif_schedule(q);
1902 } 1985 }
1903 } 1986 }
1904 } 1987 }
@@ -1979,10 +2062,11 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1979 */ 2062 */
1980static int ing_filter(struct sk_buff *skb) 2063static int ing_filter(struct sk_buff *skb)
1981{ 2064{
1982 struct Qdisc *q;
1983 struct net_device *dev = skb->dev; 2065 struct net_device *dev = skb->dev;
1984 int result = TC_ACT_OK;
1985 u32 ttl = G_TC_RTTL(skb->tc_verd); 2066 u32 ttl = G_TC_RTTL(skb->tc_verd);
2067 struct netdev_queue *rxq;
2068 int result = TC_ACT_OK;
2069 struct Qdisc *q;
1986 2070
1987 if (MAX_RED_LOOP < ttl++) { 2071 if (MAX_RED_LOOP < ttl++) {
1988 printk(KERN_WARNING 2072 printk(KERN_WARNING
@@ -1994,10 +2078,14 @@ static int ing_filter(struct sk_buff *skb)
1994 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); 2078 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
1995 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); 2079 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1996 2080
1997 spin_lock(&dev->ingress_lock); 2081 rxq = &dev->rx_queue;
1998 if ((q = dev->qdisc_ingress) != NULL) 2082
2083 q = rxq->qdisc;
2084 if (q) {
2085 spin_lock(qdisc_lock(q));
1999 result = q->enqueue(skb, q); 2086 result = q->enqueue(skb, q);
2000 spin_unlock(&dev->ingress_lock); 2087 spin_unlock(qdisc_lock(q));
2088 }
2001 2089
2002 return result; 2090 return result;
2003} 2091}
@@ -2006,7 +2094,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2006 struct packet_type **pt_prev, 2094 struct packet_type **pt_prev,
2007 int *ret, struct net_device *orig_dev) 2095 int *ret, struct net_device *orig_dev)
2008{ 2096{
2009 if (!skb->dev->qdisc_ingress) 2097 if (!skb->dev->rx_queue.qdisc)
2010 goto out; 2098 goto out;
2011 2099
2012 if (*pt_prev) { 2100 if (*pt_prev) {
@@ -2030,6 +2118,33 @@ out:
2030} 2118}
2031#endif 2119#endif
2032 2120
2121/*
2122 * netif_nit_deliver - deliver received packets to network taps
2123 * @skb: buffer
2124 *
2125 * This function is used to deliver incoming packets to network
2126 * taps. It should be used when the normal netif_receive_skb path
2127 * is bypassed, for example because of VLAN acceleration.
2128 */
2129void netif_nit_deliver(struct sk_buff *skb)
2130{
2131 struct packet_type *ptype;
2132
2133 if (list_empty(&ptype_all))
2134 return;
2135
2136 skb_reset_network_header(skb);
2137 skb_reset_transport_header(skb);
2138 skb->mac_len = skb->network_header - skb->mac_header;
2139
2140 rcu_read_lock();
2141 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2142 if (!ptype->dev || ptype->dev == skb->dev)
2143 deliver_skb(skb, ptype, skb->dev);
2144 }
2145 rcu_read_unlock();
2146}
2147
2033/** 2148/**
2034 * netif_receive_skb - process receive buffer from network 2149 * netif_receive_skb - process receive buffer from network
2035 * @skb: buffer to process 2150 * @skb: buffer to process
@@ -2769,16 +2884,29 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2769 return 0; 2884 return 0;
2770} 2885}
2771 2886
2772static void __dev_set_promiscuity(struct net_device *dev, int inc) 2887static int __dev_set_promiscuity(struct net_device *dev, int inc)
2773{ 2888{
2774 unsigned short old_flags = dev->flags; 2889 unsigned short old_flags = dev->flags;
2775 2890
2776 ASSERT_RTNL(); 2891 ASSERT_RTNL();
2777 2892
2778 if ((dev->promiscuity += inc) == 0) 2893 dev->flags |= IFF_PROMISC;
2779 dev->flags &= ~IFF_PROMISC; 2894 dev->promiscuity += inc;
2780 else 2895 if (dev->promiscuity == 0) {
2781 dev->flags |= IFF_PROMISC; 2896 /*
2897 * Avoid overflow.
2898 * If inc causes overflow, untouch promisc and return error.
2899 */
2900 if (inc < 0)
2901 dev->flags &= ~IFF_PROMISC;
2902 else {
2903 dev->promiscuity -= inc;
2904 printk(KERN_WARNING "%s: promiscuity touches roof, "
2905 "set promiscuity failed, promiscuity feature "
2906 "of device might be broken.\n", dev->name);
2907 return -EOVERFLOW;
2908 }
2909 }
2782 if (dev->flags != old_flags) { 2910 if (dev->flags != old_flags) {
2783 printk(KERN_INFO "device %s %s promiscuous mode\n", 2911 printk(KERN_INFO "device %s %s promiscuous mode\n",
2784 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 2912 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
@@ -2796,6 +2924,7 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
2796 if (dev->change_rx_flags) 2924 if (dev->change_rx_flags)
2797 dev->change_rx_flags(dev, IFF_PROMISC); 2925 dev->change_rx_flags(dev, IFF_PROMISC);
2798 } 2926 }
2927 return 0;
2799} 2928}
2800 2929
2801/** 2930/**
@@ -2807,14 +2936,19 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
2807 * remains above zero the interface remains promiscuous. Once it hits zero 2936 * remains above zero the interface remains promiscuous. Once it hits zero
2808 * the device reverts back to normal filtering operation. A negative inc 2937 * the device reverts back to normal filtering operation. A negative inc
2809 * value is used to drop promiscuity on the device. 2938 * value is used to drop promiscuity on the device.
2939 * Return 0 if successful or a negative errno code on error.
2810 */ 2940 */
2811void dev_set_promiscuity(struct net_device *dev, int inc) 2941int dev_set_promiscuity(struct net_device *dev, int inc)
2812{ 2942{
2813 unsigned short old_flags = dev->flags; 2943 unsigned short old_flags = dev->flags;
2944 int err;
2814 2945
2815 __dev_set_promiscuity(dev, inc); 2946 err = __dev_set_promiscuity(dev, inc);
2947 if (err < 0)
2948 return err;
2816 if (dev->flags != old_flags) 2949 if (dev->flags != old_flags)
2817 dev_set_rx_mode(dev); 2950 dev_set_rx_mode(dev);
2951 return err;
2818} 2952}
2819 2953
2820/** 2954/**
@@ -2827,22 +2961,38 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
2827 * to all interfaces. Once it hits zero the device reverts back to normal 2961 * to all interfaces. Once it hits zero the device reverts back to normal
2828 * filtering operation. A negative @inc value is used to drop the counter 2962 * filtering operation. A negative @inc value is used to drop the counter
2829 * when releasing a resource needing all multicasts. 2963 * when releasing a resource needing all multicasts.
2964 * Return 0 if successful or a negative errno code on error.
2830 */ 2965 */
2831 2966
2832void dev_set_allmulti(struct net_device *dev, int inc) 2967int dev_set_allmulti(struct net_device *dev, int inc)
2833{ 2968{
2834 unsigned short old_flags = dev->flags; 2969 unsigned short old_flags = dev->flags;
2835 2970
2836 ASSERT_RTNL(); 2971 ASSERT_RTNL();
2837 2972
2838 dev->flags |= IFF_ALLMULTI; 2973 dev->flags |= IFF_ALLMULTI;
2839 if ((dev->allmulti += inc) == 0) 2974 dev->allmulti += inc;
2840 dev->flags &= ~IFF_ALLMULTI; 2975 if (dev->allmulti == 0) {
2976 /*
2977 * Avoid overflow.
2978 * If inc causes overflow, untouch allmulti and return error.
2979 */
2980 if (inc < 0)
2981 dev->flags &= ~IFF_ALLMULTI;
2982 else {
2983 dev->allmulti -= inc;
2984 printk(KERN_WARNING "%s: allmulti touches roof, "
2985 "set allmulti failed, allmulti feature of "
2986 "device might be broken.\n", dev->name);
2987 return -EOVERFLOW;
2988 }
2989 }
2841 if (dev->flags ^ old_flags) { 2990 if (dev->flags ^ old_flags) {
2842 if (dev->change_rx_flags) 2991 if (dev->change_rx_flags)
2843 dev->change_rx_flags(dev, IFF_ALLMULTI); 2992 dev->change_rx_flags(dev, IFF_ALLMULTI);
2844 dev_set_rx_mode(dev); 2993 dev_set_rx_mode(dev);
2845 } 2994 }
2995 return 0;
2846} 2996}
2847 2997
2848/* 2998/*
@@ -2881,9 +3031,9 @@ void __dev_set_rx_mode(struct net_device *dev)
2881 3031
2882void dev_set_rx_mode(struct net_device *dev) 3032void dev_set_rx_mode(struct net_device *dev)
2883{ 3033{
2884 netif_tx_lock_bh(dev); 3034 netif_addr_lock_bh(dev);
2885 __dev_set_rx_mode(dev); 3035 __dev_set_rx_mode(dev);
2886 netif_tx_unlock_bh(dev); 3036 netif_addr_unlock_bh(dev);
2887} 3037}
2888 3038
2889int __dev_addr_delete(struct dev_addr_list **list, int *count, 3039int __dev_addr_delete(struct dev_addr_list **list, int *count,
@@ -2961,11 +3111,11 @@ int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2961 3111
2962 ASSERT_RTNL(); 3112 ASSERT_RTNL();
2963 3113
2964 netif_tx_lock_bh(dev); 3114 netif_addr_lock_bh(dev);
2965 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3115 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2966 if (!err) 3116 if (!err)
2967 __dev_set_rx_mode(dev); 3117 __dev_set_rx_mode(dev);
2968 netif_tx_unlock_bh(dev); 3118 netif_addr_unlock_bh(dev);
2969 return err; 3119 return err;
2970} 3120}
2971EXPORT_SYMBOL(dev_unicast_delete); 3121EXPORT_SYMBOL(dev_unicast_delete);
@@ -2987,11 +3137,11 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2987 3137
2988 ASSERT_RTNL(); 3138 ASSERT_RTNL();
2989 3139
2990 netif_tx_lock_bh(dev); 3140 netif_addr_lock_bh(dev);
2991 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3141 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2992 if (!err) 3142 if (!err)
2993 __dev_set_rx_mode(dev); 3143 __dev_set_rx_mode(dev);
2994 netif_tx_unlock_bh(dev); 3144 netif_addr_unlock_bh(dev);
2995 return err; 3145 return err;
2996} 3146}
2997EXPORT_SYMBOL(dev_unicast_add); 3147EXPORT_SYMBOL(dev_unicast_add);
@@ -3058,12 +3208,12 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3058{ 3208{
3059 int err = 0; 3209 int err = 0;
3060 3210
3061 netif_tx_lock_bh(to); 3211 netif_addr_lock_bh(to);
3062 err = __dev_addr_sync(&to->uc_list, &to->uc_count, 3212 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3063 &from->uc_list, &from->uc_count); 3213 &from->uc_list, &from->uc_count);
3064 if (!err) 3214 if (!err)
3065 __dev_set_rx_mode(to); 3215 __dev_set_rx_mode(to);
3066 netif_tx_unlock_bh(to); 3216 netif_addr_unlock_bh(to);
3067 return err; 3217 return err;
3068} 3218}
3069EXPORT_SYMBOL(dev_unicast_sync); 3219EXPORT_SYMBOL(dev_unicast_sync);
@@ -3079,15 +3229,15 @@ EXPORT_SYMBOL(dev_unicast_sync);
3079 */ 3229 */
3080void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3230void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3081{ 3231{
3082 netif_tx_lock_bh(from); 3232 netif_addr_lock_bh(from);
3083 netif_tx_lock_bh(to); 3233 netif_addr_lock(to);
3084 3234
3085 __dev_addr_unsync(&to->uc_list, &to->uc_count, 3235 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3086 &from->uc_list, &from->uc_count); 3236 &from->uc_list, &from->uc_count);
3087 __dev_set_rx_mode(to); 3237 __dev_set_rx_mode(to);
3088 3238
3089 netif_tx_unlock_bh(to); 3239 netif_addr_unlock(to);
3090 netif_tx_unlock_bh(from); 3240 netif_addr_unlock_bh(from);
3091} 3241}
3092EXPORT_SYMBOL(dev_unicast_unsync); 3242EXPORT_SYMBOL(dev_unicast_unsync);
3093 3243
@@ -3107,7 +3257,7 @@ static void __dev_addr_discard(struct dev_addr_list **list)
3107 3257
3108static void dev_addr_discard(struct net_device *dev) 3258static void dev_addr_discard(struct net_device *dev)
3109{ 3259{
3110 netif_tx_lock_bh(dev); 3260 netif_addr_lock_bh(dev);
3111 3261
3112 __dev_addr_discard(&dev->uc_list); 3262 __dev_addr_discard(&dev->uc_list);
3113 dev->uc_count = 0; 3263 dev->uc_count = 0;
@@ -3115,7 +3265,7 @@ static void dev_addr_discard(struct net_device *dev)
3115 __dev_addr_discard(&dev->mc_list); 3265 __dev_addr_discard(&dev->mc_list);
3116 dev->mc_count = 0; 3266 dev->mc_count = 0;
3117 3267
3118 netif_tx_unlock_bh(dev); 3268 netif_addr_unlock_bh(dev);
3119} 3269}
3120 3270
3121unsigned dev_get_flags(const struct net_device *dev) 3271unsigned dev_get_flags(const struct net_device *dev)
@@ -3688,6 +3838,21 @@ static void rollback_registered(struct net_device *dev)
3688 dev_put(dev); 3838 dev_put(dev);
3689} 3839}
3690 3840
3841static void __netdev_init_queue_locks_one(struct net_device *dev,
3842 struct netdev_queue *dev_queue,
3843 void *_unused)
3844{
3845 spin_lock_init(&dev_queue->_xmit_lock);
3846 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3847 dev_queue->xmit_lock_owner = -1;
3848}
3849
3850static void netdev_init_queue_locks(struct net_device *dev)
3851{
3852 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3853 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3854}
3855
3691/** 3856/**
3692 * register_netdevice - register a network device 3857 * register_netdevice - register a network device
3693 * @dev: device to register 3858 * @dev: device to register
@@ -3722,11 +3887,10 @@ int register_netdevice(struct net_device *dev)
3722 BUG_ON(!dev_net(dev)); 3887 BUG_ON(!dev_net(dev));
3723 net = dev_net(dev); 3888 net = dev_net(dev);
3724 3889
3725 spin_lock_init(&dev->queue_lock); 3890 spin_lock_init(&dev->addr_list_lock);
3726 spin_lock_init(&dev->_xmit_lock); 3891 spin_lock_init(&dev->qdisc_list_lock);
3727 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); 3892 INIT_LIST_HEAD(&dev->qdisc_list);
3728 dev->xmit_lock_owner = -1; 3893 netdev_init_queue_locks(dev);
3729 spin_lock_init(&dev->ingress_lock);
3730 3894
3731 dev->iflink = -1; 3895 dev->iflink = -1;
3732 3896
@@ -4007,6 +4171,19 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
4007 return &dev->stats; 4171 return &dev->stats;
4008} 4172}
4009 4173
4174static void netdev_init_one_queue(struct net_device *dev,
4175 struct netdev_queue *queue,
4176 void *_unused)
4177{
4178 queue->dev = dev;
4179}
4180
4181static void netdev_init_queues(struct net_device *dev)
4182{
4183 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4184 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4185}
4186
4010/** 4187/**
4011 * alloc_netdev_mq - allocate network device 4188 * alloc_netdev_mq - allocate network device
4012 * @sizeof_priv: size of private data to allocate space for 4189 * @sizeof_priv: size of private data to allocate space for
@@ -4021,14 +4198,14 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
4021struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 4198struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4022 void (*setup)(struct net_device *), unsigned int queue_count) 4199 void (*setup)(struct net_device *), unsigned int queue_count)
4023{ 4200{
4024 void *p; 4201 struct netdev_queue *tx;
4025 struct net_device *dev; 4202 struct net_device *dev;
4026 int alloc_size; 4203 int alloc_size;
4204 void *p;
4027 4205
4028 BUG_ON(strlen(name) >= sizeof(dev->name)); 4206 BUG_ON(strlen(name) >= sizeof(dev->name));
4029 4207
4030 alloc_size = sizeof(struct net_device) + 4208 alloc_size = sizeof(struct net_device);
4031 sizeof(struct net_device_subqueue) * (queue_count - 1);
4032 if (sizeof_priv) { 4209 if (sizeof_priv) {
4033 /* ensure 32-byte alignment of private area */ 4210 /* ensure 32-byte alignment of private area */
4034 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 4211 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
@@ -4043,22 +4220,33 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4043 return NULL; 4220 return NULL;
4044 } 4221 }
4045 4222
4223 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
4224 if (!tx) {
4225 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4226 "tx qdiscs.\n");
4227 kfree(p);
4228 return NULL;
4229 }
4230
4046 dev = (struct net_device *) 4231 dev = (struct net_device *)
4047 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 4232 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4048 dev->padded = (char *)dev - (char *)p; 4233 dev->padded = (char *)dev - (char *)p;
4049 dev_net_set(dev, &init_net); 4234 dev_net_set(dev, &init_net);
4050 4235
4236 dev->_tx = tx;
4237 dev->num_tx_queues = queue_count;
4238 dev->real_num_tx_queues = queue_count;
4239
4051 if (sizeof_priv) { 4240 if (sizeof_priv) {
4052 dev->priv = ((char *)dev + 4241 dev->priv = ((char *)dev +
4053 ((sizeof(struct net_device) + 4242 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4054 (sizeof(struct net_device_subqueue) *
4055 (queue_count - 1)) + NETDEV_ALIGN_CONST)
4056 & ~NETDEV_ALIGN_CONST)); 4243 & ~NETDEV_ALIGN_CONST));
4057 } 4244 }
4058 4245
4059 dev->egress_subqueue_count = queue_count;
4060 dev->gso_max_size = GSO_MAX_SIZE; 4246 dev->gso_max_size = GSO_MAX_SIZE;
4061 4247
4248 netdev_init_queues(dev);
4249
4062 dev->get_stats = internal_stats; 4250 dev->get_stats = internal_stats;
4063 netpoll_netdev_init(dev); 4251 netpoll_netdev_init(dev);
4064 setup(dev); 4252 setup(dev);
@@ -4079,6 +4267,8 @@ void free_netdev(struct net_device *dev)
4079{ 4267{
4080 release_net(dev_net(dev)); 4268 release_net(dev_net(dev));
4081 4269
4270 kfree(dev->_tx);
4271
4082 /* Compatibility with error handling in drivers */ 4272 /* Compatibility with error handling in drivers */
4083 if (dev->reg_state == NETREG_UNINITIALIZED) { 4273 if (dev->reg_state == NETREG_UNINITIALIZED) {
4084 kfree((char *)dev - dev->padded); 4274 kfree((char *)dev - dev->padded);
@@ -4260,7 +4450,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4260 void *ocpu) 4450 void *ocpu)
4261{ 4451{
4262 struct sk_buff **list_skb; 4452 struct sk_buff **list_skb;
4263 struct net_device **list_net; 4453 struct Qdisc **list_net;
4264 struct sk_buff *skb; 4454 struct sk_buff *skb;
4265 unsigned int cpu, oldcpu = (unsigned long)ocpu; 4455 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4266 struct softnet_data *sd, *oldsd; 4456 struct softnet_data *sd, *oldsd;
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index f8a3455f4493..5402b3b38e0d 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -72,7 +72,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
72{ 72{
73 int err; 73 int err;
74 74
75 netif_tx_lock_bh(dev); 75 netif_addr_lock_bh(dev);
76 err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, 76 err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
77 addr, alen, glbl); 77 addr, alen, glbl);
78 if (!err) { 78 if (!err) {
@@ -83,7 +83,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
83 83
84 __dev_set_rx_mode(dev); 84 __dev_set_rx_mode(dev);
85 } 85 }
86 netif_tx_unlock_bh(dev); 86 netif_addr_unlock_bh(dev);
87 return err; 87 return err;
88} 88}
89 89
@@ -95,11 +95,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
95{ 95{
96 int err; 96 int err;
97 97
98 netif_tx_lock_bh(dev); 98 netif_addr_lock_bh(dev);
99 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); 99 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
100 if (!err) 100 if (!err)
101 __dev_set_rx_mode(dev); 101 __dev_set_rx_mode(dev);
102 netif_tx_unlock_bh(dev); 102 netif_addr_unlock_bh(dev);
103 return err; 103 return err;
104} 104}
105 105
@@ -119,12 +119,12 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
119{ 119{
120 int err = 0; 120 int err = 0;
121 121
122 netif_tx_lock_bh(to); 122 netif_addr_lock_bh(to);
123 err = __dev_addr_sync(&to->mc_list, &to->mc_count, 123 err = __dev_addr_sync(&to->mc_list, &to->mc_count,
124 &from->mc_list, &from->mc_count); 124 &from->mc_list, &from->mc_count);
125 if (!err) 125 if (!err)
126 __dev_set_rx_mode(to); 126 __dev_set_rx_mode(to);
127 netif_tx_unlock_bh(to); 127 netif_addr_unlock_bh(to);
128 128
129 return err; 129 return err;
130} 130}
@@ -143,15 +143,15 @@ EXPORT_SYMBOL(dev_mc_sync);
143 */ 143 */
144void dev_mc_unsync(struct net_device *to, struct net_device *from) 144void dev_mc_unsync(struct net_device *to, struct net_device *from)
145{ 145{
146 netif_tx_lock_bh(from); 146 netif_addr_lock_bh(from);
147 netif_tx_lock_bh(to); 147 netif_addr_lock(to);
148 148
149 __dev_addr_unsync(&to->mc_list, &to->mc_count, 149 __dev_addr_unsync(&to->mc_list, &to->mc_count,
150 &from->mc_list, &from->mc_count); 150 &from->mc_list, &from->mc_count);
151 __dev_set_rx_mode(to); 151 __dev_set_rx_mode(to);
152 152
153 netif_tx_unlock_bh(to); 153 netif_addr_unlock(to);
154 netif_tx_unlock_bh(from); 154 netif_addr_unlock_bh(from);
155} 155}
156EXPORT_SYMBOL(dev_mc_unsync); 156EXPORT_SYMBOL(dev_mc_unsync);
157 157
@@ -164,7 +164,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
164 if (v == SEQ_START_TOKEN) 164 if (v == SEQ_START_TOKEN)
165 return 0; 165 return 0;
166 166
167 netif_tx_lock_bh(dev); 167 netif_addr_lock_bh(dev);
168 for (m = dev->mc_list; m; m = m->next) { 168 for (m = dev->mc_list; m; m = m->next) {
169 int i; 169 int i;
170 170
@@ -176,7 +176,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
176 176
177 seq_putc(seq, '\n'); 177 seq_putc(seq, '\n');
178 } 178 }
179 netif_tx_unlock_bh(dev); 179 netif_addr_unlock_bh(dev);
180 return 0; 180 return 0;
181} 181}
182 182
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0133b5ebd545..14ada537f895 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -209,6 +209,36 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
209 return 0; 209 return 0;
210} 210}
211 211
212static int ethtool_set_rxhash(struct net_device *dev, void __user *useraddr)
213{
214 struct ethtool_rxnfc cmd;
215
216 if (!dev->ethtool_ops->set_rxhash)
217 return -EOPNOTSUPP;
218
219 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
220 return -EFAULT;
221
222 return dev->ethtool_ops->set_rxhash(dev, &cmd);
223}
224
225static int ethtool_get_rxhash(struct net_device *dev, void __user *useraddr)
226{
227 struct ethtool_rxnfc info;
228
229 if (!dev->ethtool_ops->get_rxhash)
230 return -EOPNOTSUPP;
231
232 if (copy_from_user(&info, useraddr, sizeof(info)))
233 return -EFAULT;
234
235 dev->ethtool_ops->get_rxhash(dev, &info);
236
237 if (copy_to_user(useraddr, &info, sizeof(info)))
238 return -EFAULT;
239 return 0;
240}
241
212static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 242static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
213{ 243{
214 struct ethtool_regs regs; 244 struct ethtool_regs regs;
@@ -826,6 +856,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
826 case ETHTOOL_GGSO: 856 case ETHTOOL_GGSO:
827 case ETHTOOL_GFLAGS: 857 case ETHTOOL_GFLAGS:
828 case ETHTOOL_GPFLAGS: 858 case ETHTOOL_GPFLAGS:
859 case ETHTOOL_GRXFH:
829 break; 860 break;
830 default: 861 default:
831 if (!capable(CAP_NET_ADMIN)) 862 if (!capable(CAP_NET_ADMIN))
@@ -977,6 +1008,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
977 rc = ethtool_set_value(dev, useraddr, 1008 rc = ethtool_set_value(dev, useraddr,
978 dev->ethtool_ops->set_priv_flags); 1009 dev->ethtool_ops->set_priv_flags);
979 break; 1010 break;
1011 case ETHTOOL_GRXFH:
1012 rc = ethtool_get_rxhash(dev, useraddr);
1013 break;
1014 case ETHTOOL_SRXFH:
1015 rc = ethtool_set_rxhash(dev, useraddr);
1016 break;
980 default: 1017 default:
981 rc = -EOPNOTSUPP; 1018 rc = -EOPNOTSUPP;
982 } 1019 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 277a2302eb3a..79de3b14a8d1 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -69,7 +69,7 @@ static void rules_ops_put(struct fib_rules_ops *ops)
69static void flush_route_cache(struct fib_rules_ops *ops) 69static void flush_route_cache(struct fib_rules_ops *ops)
70{ 70{
71 if (ops->flush_cache) 71 if (ops->flush_cache)
72 ops->flush_cache(); 72 ops->flush_cache(ops);
73} 73}
74 74
75int fib_rules_register(struct fib_rules_ops *ops) 75int fib_rules_register(struct fib_rules_ops *ops)
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index a5e372b9ec4d..bf8f7af699d7 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -77,10 +77,10 @@ static void rfc2863_policy(struct net_device *dev)
77} 77}
78 78
79 79
80static int linkwatch_urgent_event(struct net_device *dev) 80static bool linkwatch_urgent_event(struct net_device *dev)
81{ 81{
82 return netif_running(dev) && netif_carrier_ok(dev) && 82 return netif_running(dev) && netif_carrier_ok(dev) &&
83 dev->qdisc != dev->qdisc_sleeping; 83 qdisc_tx_changing(dev);
84} 84}
85 85
86 86
@@ -180,10 +180,9 @@ static void __linkwatch_run_queue(int urgent_only)
180 180
181 rfc2863_policy(dev); 181 rfc2863_policy(dev);
182 if (dev->flags & IFF_UP) { 182 if (dev->flags & IFF_UP) {
183 if (netif_carrier_ok(dev)) { 183 if (netif_carrier_ok(dev))
184 WARN_ON(dev->qdisc_sleeping == &noop_qdisc);
185 dev_activate(dev); 184 dev_activate(dev);
186 } else 185 else
187 dev_deactivate(dev); 186 dev_deactivate(dev);
188 187
189 netdev_state_change(dev); 188 netdev_state_change(dev);
@@ -214,7 +213,7 @@ static void linkwatch_event(struct work_struct *dummy)
214 213
215void linkwatch_fire_event(struct net_device *dev) 214void linkwatch_fire_event(struct net_device *dev)
216{ 215{
217 int urgent = linkwatch_urgent_event(dev); 216 bool urgent = linkwatch_urgent_event(dev);
218 217
219 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 218 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
220 dev_hold(dev); 219 dev_hold(dev);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 65f01f71b3f3..f62c8af85d38 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -930,6 +930,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
930 buff = neigh->arp_queue.next; 930 buff = neigh->arp_queue.next;
931 __skb_unlink(buff, &neigh->arp_queue); 931 __skb_unlink(buff, &neigh->arp_queue);
932 kfree_skb(buff); 932 kfree_skb(buff);
933 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
933 } 934 }
934 __skb_queue_tail(&neigh->arp_queue, skb); 935 __skb_queue_tail(&neigh->arp_queue, skb);
935 } 936 }
@@ -2462,12 +2463,12 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2462 struct neigh_statistics *st = v; 2463 struct neigh_statistics *st = v;
2463 2464
2464 if (v == SEQ_START_TOKEN) { 2465 if (v == SEQ_START_TOKEN) {
2465 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n"); 2466 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2466 return 0; 2467 return 0;
2467 } 2468 }
2468 2469
2469 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " 2470 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2470 "%08lx %08lx %08lx %08lx\n", 2471 "%08lx %08lx %08lx %08lx %08lx\n",
2471 atomic_read(&tbl->entries), 2472 atomic_read(&tbl->entries),
2472 2473
2473 st->allocs, 2474 st->allocs,
@@ -2483,7 +2484,8 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2483 st->rcv_probes_ucast, 2484 st->rcv_probes_ucast,
2484 2485
2485 st->periodic_gc_runs, 2486 st->periodic_gc_runs,
2486 st->forced_gc_runs 2487 st->forced_gc_runs,
2488 st->unres_discards
2487 ); 2489 );
2488 2490
2489 return 0; 2491 return 0;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 90e2177af081..c1f4e0d428c0 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -242,11 +242,11 @@ static ssize_t netstat_show(const struct device *d,
242 offset % sizeof(unsigned long) != 0); 242 offset % sizeof(unsigned long) != 0);
243 243
244 read_lock(&dev_base_lock); 244 read_lock(&dev_base_lock);
245 if (dev_isalive(dev) && dev->get_stats && 245 if (dev_isalive(dev)) {
246 (stats = (*dev->get_stats)(dev))) 246 stats = dev->get_stats(dev);
247 ret = sprintf(buf, fmt_ulong, 247 ret = sprintf(buf, fmt_ulong,
248 *(unsigned long *)(((u8 *) stats) + offset)); 248 *(unsigned long *)(((u8 *) stats) + offset));
249 249 }
250 read_unlock(&dev_base_lock); 250 read_unlock(&dev_base_lock);
251 return ret; 251 return ret;
252} 252}
@@ -318,7 +318,7 @@ static struct attribute_group netstat_group = {
318 .attrs = netstat_attrs, 318 .attrs = netstat_attrs,
319}; 319};
320 320
321#ifdef CONFIG_WIRELESS_EXT 321#ifdef CONFIG_WIRELESS_EXT_SYSFS
322/* helper function that does all the locking etc for wireless stats */ 322/* helper function that does all the locking etc for wireless stats */
323static ssize_t wireless_show(struct device *d, char *buf, 323static ssize_t wireless_show(struct device *d, char *buf,
324 ssize_t (*format)(const struct iw_statistics *, 324 ssize_t (*format)(const struct iw_statistics *,
@@ -457,10 +457,9 @@ int netdev_register_kobject(struct net_device *net)
457 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE); 457 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE);
458 458
459#ifdef CONFIG_SYSFS 459#ifdef CONFIG_SYSFS
460 if (net->get_stats) 460 *groups++ = &netstat_group;
461 *groups++ = &netstat_group;
462 461
463#ifdef CONFIG_WIRELESS_EXT 462#ifdef CONFIG_WIRELESS_EXT_SYSFS
464 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats) 463 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
465 *groups++ = &wireless_group; 464 *groups++ = &wireless_group;
466#endif 465#endif
@@ -469,6 +468,19 @@ int netdev_register_kobject(struct net_device *net)
469 return device_add(dev); 468 return device_add(dev);
470} 469}
471 470
471int netdev_class_create_file(struct class_attribute *class_attr)
472{
473 return class_create_file(&net_class, class_attr);
474}
475
476void netdev_class_remove_file(struct class_attribute *class_attr)
477{
478 class_remove_file(&net_class, class_attr);
479}
480
481EXPORT_SYMBOL(netdev_class_create_file);
482EXPORT_SYMBOL(netdev_class_remove_file);
483
472void netdev_initialize_kobject(struct net_device *net) 484void netdev_initialize_kobject(struct net_device *net)
473{ 485{
474 struct device *device = &(net->dev); 486 struct device *device = &(net->dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8fb134da0346..c12720895ecf 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,25 +58,27 @@ static void queue_process(struct work_struct *work)
58 58
59 while ((skb = skb_dequeue(&npinfo->txq))) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev; 60 struct net_device *dev = skb->dev;
61 struct netdev_queue *txq;
61 62
62 if (!netif_device_present(dev) || !netif_running(dev)) { 63 if (!netif_device_present(dev) || !netif_running(dev)) {
63 __kfree_skb(skb); 64 __kfree_skb(skb);
64 continue; 65 continue;
65 } 66 }
66 67
68 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
69
67 local_irq_save(flags); 70 local_irq_save(flags);
68 netif_tx_lock(dev); 71 __netif_tx_lock(txq, smp_processor_id());
69 if ((netif_queue_stopped(dev) || 72 if (netif_tx_queue_stopped(txq) ||
70 netif_subqueue_stopped(dev, skb)) || 73 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
72 skb_queue_head(&npinfo->txq, skb); 74 skb_queue_head(&npinfo->txq, skb);
73 netif_tx_unlock(dev); 75 __netif_tx_unlock(txq);
74 local_irq_restore(flags); 76 local_irq_restore(flags);
75 77
76 schedule_delayed_work(&npinfo->tx_work, HZ/10); 78 schedule_delayed_work(&npinfo->tx_work, HZ/10);
77 return; 79 return;
78 } 80 }
79 netif_tx_unlock(dev); 81 __netif_tx_unlock(txq);
80 local_irq_restore(flags); 82 local_irq_restore(flags);
81 } 83 }
82} 84}
@@ -278,17 +280,19 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
278 280
279 /* don't get messages out of order, and no recursion */ 281 /* don't get messages out of order, and no recursion */
280 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 282 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
283 struct netdev_queue *txq;
281 unsigned long flags; 284 unsigned long flags;
282 285
286 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
287
283 local_irq_save(flags); 288 local_irq_save(flags);
284 /* try until next clock tick */ 289 /* try until next clock tick */
285 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 290 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
286 tries > 0; --tries) { 291 tries > 0; --tries) {
287 if (netif_tx_trylock(dev)) { 292 if (__netif_tx_trylock(txq)) {
288 if (!netif_queue_stopped(dev) && 293 if (!netif_tx_queue_stopped(txq))
289 !netif_subqueue_stopped(dev, skb))
290 status = dev->hard_start_xmit(skb, dev); 294 status = dev->hard_start_xmit(skb, dev);
291 netif_tx_unlock(dev); 295 __netif_tx_unlock(txq);
292 296
293 if (status == NETDEV_TX_OK) 297 if (status == NETDEV_TX_OK)
294 break; 298 break;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fdf537707e51..906802db4ed4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2123,6 +2123,24 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2123 } 2123 }
2124} 2124}
2125#endif 2125#endif
2126static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2127{
2128 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
2129 __u16 t;
2130 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2131 t = random32() %
2132 (pkt_dev->queue_map_max -
2133 pkt_dev->queue_map_min + 1)
2134 + pkt_dev->queue_map_min;
2135 } else {
2136 t = pkt_dev->cur_queue_map + 1;
2137 if (t > pkt_dev->queue_map_max)
2138 t = pkt_dev->queue_map_min;
2139 }
2140 pkt_dev->cur_queue_map = t;
2141 }
2142}
2143
2126/* Increment/randomize headers according to flags and current values 2144/* Increment/randomize headers according to flags and current values
2127 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2145 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2128 */ 2146 */
@@ -2325,19 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2325 pkt_dev->cur_pkt_size = t; 2343 pkt_dev->cur_pkt_size = t;
2326 } 2344 }
2327 2345
2328 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2346 set_cur_queue_map(pkt_dev);
2329 __u16 t;
2330 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2331 t = random32() %
2332 (pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1)
2333 + pkt_dev->queue_map_min;
2334 } else {
2335 t = pkt_dev->cur_queue_map + 1;
2336 if (t > pkt_dev->queue_map_max)
2337 t = pkt_dev->queue_map_min;
2338 }
2339 pkt_dev->cur_queue_map = t;
2340 }
2341 2347
2342 pkt_dev->flows[flow].count++; 2348 pkt_dev->flows[flow].count++;
2343} 2349}
@@ -2458,7 +2464,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2458 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2464 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2459 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2465 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2460 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2466 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2461 2467 u16 queue_map;
2462 2468
2463 if (pkt_dev->nr_labels) 2469 if (pkt_dev->nr_labels)
2464 protocol = htons(ETH_P_MPLS_UC); 2470 protocol = htons(ETH_P_MPLS_UC);
@@ -2469,6 +2475,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2469 /* Update any of the values, used when we're incrementing various 2475 /* Update any of the values, used when we're incrementing various
2470 * fields. 2476 * fields.
2471 */ 2477 */
2478 queue_map = pkt_dev->cur_queue_map;
2472 mod_cur_headers(pkt_dev); 2479 mod_cur_headers(pkt_dev);
2473 2480
2474 datalen = (odev->hard_header_len + 16) & ~0xf; 2481 datalen = (odev->hard_header_len + 16) & ~0xf;
@@ -2507,7 +2514,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2507 skb->network_header = skb->tail; 2514 skb->network_header = skb->tail;
2508 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2515 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2509 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2516 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2510 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2517 skb_set_queue_mapping(skb, queue_map);
2511 iph = ip_hdr(skb); 2518 iph = ip_hdr(skb);
2512 udph = udp_hdr(skb); 2519 udph = udp_hdr(skb);
2513 2520
@@ -2797,6 +2804,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2797 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2804 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2798 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2805 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2799 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2806 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2807 u16 queue_map;
2800 2808
2801 if (pkt_dev->nr_labels) 2809 if (pkt_dev->nr_labels)
2802 protocol = htons(ETH_P_MPLS_UC); 2810 protocol = htons(ETH_P_MPLS_UC);
@@ -2807,6 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2807 /* Update any of the values, used when we're incrementing various 2815 /* Update any of the values, used when we're incrementing various
2808 * fields. 2816 * fields.
2809 */ 2817 */
2818 queue_map = pkt_dev->cur_queue_map;
2810 mod_cur_headers(pkt_dev); 2819 mod_cur_headers(pkt_dev);
2811 2820
2812 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2821 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
@@ -2844,7 +2853,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2844 skb->network_header = skb->tail; 2853 skb->network_header = skb->tail;
2845 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2854 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
2846 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2855 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
2847 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2856 skb_set_queue_mapping(skb, queue_map);
2848 iph = ipv6_hdr(skb); 2857 iph = ipv6_hdr(skb);
2849 udph = udp_hdr(skb); 2858 udph = udp_hdr(skb);
2850 2859
@@ -3263,7 +3272,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3263static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3272static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3264{ 3273{
3265 struct net_device *odev = NULL; 3274 struct net_device *odev = NULL;
3275 struct netdev_queue *txq;
3266 __u64 idle_start = 0; 3276 __u64 idle_start = 0;
3277 u16 queue_map;
3267 int ret; 3278 int ret;
3268 3279
3269 odev = pkt_dev->odev; 3280 odev = pkt_dev->odev;
@@ -3285,9 +3296,15 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3285 } 3296 }
3286 } 3297 }
3287 3298
3288 if ((netif_queue_stopped(odev) || 3299 if (!pkt_dev->skb) {
3289 (pkt_dev->skb && 3300 set_cur_queue_map(pkt_dev);
3290 netif_subqueue_stopped(odev, pkt_dev->skb))) || 3301 queue_map = pkt_dev->cur_queue_map;
3302 } else {
3303 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3304 }
3305
3306 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) ||
3291 need_resched()) { 3308 need_resched()) {
3292 idle_start = getCurUs(); 3309 idle_start = getCurUs();
3293 3310
@@ -3303,8 +3320,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3303 3320
3304 pkt_dev->idle_acc += getCurUs() - idle_start; 3321 pkt_dev->idle_acc += getCurUs() - idle_start;
3305 3322
3306 if (netif_queue_stopped(odev) || 3323 if (netif_tx_queue_stopped(txq)) {
3307 netif_subqueue_stopped(odev, pkt_dev->skb)) {
3308 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3309 pkt_dev->next_tx_ns = 0; 3325 pkt_dev->next_tx_ns = 0;
3310 goto out; /* Try the next interface */ 3326 goto out; /* Try the next interface */
@@ -3331,9 +3347,12 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3331 } 3347 }
3332 } 3348 }
3333 3349
3334 netif_tx_lock_bh(odev); 3350 /* fill_packet() might have changed the queue */
3335 if (!netif_queue_stopped(odev) && 3351 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3336 !netif_subqueue_stopped(odev, pkt_dev->skb)) { 3352 txq = netdev_get_tx_queue(odev, queue_map);
3353
3354 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) {
3337 3356
3338 atomic_inc(&(pkt_dev->skb->users)); 3357 atomic_inc(&(pkt_dev->skb->users));
3339 retry_now: 3358 retry_now:
@@ -3377,7 +3396,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3377 pkt_dev->next_tx_ns = 0; 3396 pkt_dev->next_tx_ns = 0;
3378 } 3397 }
3379 3398
3380 netif_tx_unlock_bh(odev); 3399 __netif_tx_unlock_bh(txq);
3381 3400
3382 /* If pkt_dev->count is zero, then run forever */ 3401 /* If pkt_dev->count is zero, then run forever */
3383 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3402 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a9a77216310e..71edb8b36341 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -605,8 +605,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
605 int type, u32 pid, u32 seq, u32 change, 605 int type, u32 pid, u32 seq, u32 change,
606 unsigned int flags) 606 unsigned int flags)
607{ 607{
608 struct netdev_queue *txq;
608 struct ifinfomsg *ifm; 609 struct ifinfomsg *ifm;
609 struct nlmsghdr *nlh; 610 struct nlmsghdr *nlh;
611 struct net_device_stats *stats;
612 struct nlattr *attr;
610 613
611 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 614 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
612 if (nlh == NULL) 615 if (nlh == NULL)
@@ -633,8 +636,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
633 if (dev->master) 636 if (dev->master)
634 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 637 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
635 638
636 if (dev->qdisc_sleeping) 639 txq = netdev_get_tx_queue(dev, 0);
637 NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc_sleeping->ops->id); 640 if (txq->qdisc_sleeping)
641 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
638 642
639 if (1) { 643 if (1) {
640 struct rtnl_link_ifmap map = { 644 struct rtnl_link_ifmap map = {
@@ -653,19 +657,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
653 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); 657 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
654 } 658 }
655 659
656 if (dev->get_stats) { 660 attr = nla_reserve(skb, IFLA_STATS,
657 struct net_device_stats *stats = dev->get_stats(dev); 661 sizeof(struct rtnl_link_stats));
658 if (stats) { 662 if (attr == NULL)
659 struct nlattr *attr; 663 goto nla_put_failure;
660 664
661 attr = nla_reserve(skb, IFLA_STATS, 665 stats = dev->get_stats(dev);
662 sizeof(struct rtnl_link_stats)); 666 copy_rtnl_link_stats(nla_data(attr), stats);
663 if (attr == NULL)
664 goto nla_put_failure;
665
666 copy_rtnl_link_stats(nla_data(attr), stats);
667 }
668 }
669 667
670 if (dev->rtnl_link_ops) { 668 if (dev->rtnl_link_ops) {
671 if (rtnl_link_fill(skb, dev) < 0) 669 if (rtnl_link_fill(skb, dev) < 0)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 366621610e76..e4115672b6cf 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4,8 +4,6 @@
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de> 5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 * 6 *
7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
8 *
9 * Fixes: 7 * Fixes:
10 * Alan Cox : Fixed the worst of the load 8 * Alan Cox : Fixed the worst of the load
11 * balancer bugs. 9 * balancer bugs.
@@ -461,6 +459,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
461 new->tc_verd = old->tc_verd; 459 new->tc_verd = old->tc_verd;
462#endif 460#endif
463#endif 461#endif
462 new->vlan_tci = old->vlan_tci;
463
464 skb_copy_secmark(new, old); 464 skb_copy_secmark(new, old);
465} 465}
466 466
@@ -1282,114 +1282,83 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1282 return 0; 1282 return 0;
1283} 1283}
1284 1284
1285/* 1285static inline void __segment_seek(struct page **page, unsigned int *poff,
1286 * Map linear and fragment data from the skb to spd. Returns number of 1286 unsigned int *plen, unsigned int off)
1287 * pages mapped. 1287{
1288 */ 1288 *poff += off;
1289static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1289 *page += *poff / PAGE_SIZE;
1290 unsigned int *total_len, 1290 *poff = *poff % PAGE_SIZE;
1291 struct splice_pipe_desc *spd) 1291 *plen -= off;
1292{ 1292}
1293 unsigned int nr_pages = spd->nr_pages; 1293
1294 unsigned int poff, plen, len, toff, tlen; 1294static inline int __splice_segment(struct page *page, unsigned int poff,
1295 int headlen, seg, error = 0; 1295 unsigned int plen, unsigned int *off,
1296 1296 unsigned int *len, struct sk_buff *skb,
1297 toff = *offset; 1297 struct splice_pipe_desc *spd)
1298 tlen = *total_len; 1298{
1299 if (!tlen) { 1299 if (!*len)
1300 error = 1; 1300 return 1;
1301 goto err; 1301
1302 /* skip this segment if already processed */
1303 if (*off >= plen) {
1304 *off -= plen;
1305 return 0;
1302 } 1306 }
1303 1307
1304 /* 1308 /* ignore any bits we already processed */
1305 * if the offset is greater than the linear part, go directly to 1309 if (*off) {
1306 * the fragments. 1310 __segment_seek(&page, &poff, &plen, *off);
1307 */ 1311 *off = 0;
1308 headlen = skb_headlen(skb);
1309 if (toff >= headlen) {
1310 toff -= headlen;
1311 goto map_frag;
1312 } 1312 }
1313 1313
1314 /* 1314 do {
1315 * first map the linear region into the pages/partial map, skipping 1315 unsigned int flen = min(*len, plen);
1316 * any potential initial offset.
1317 */
1318 len = 0;
1319 while (len < headlen) {
1320 void *p = skb->data + len;
1321
1322 poff = (unsigned long) p & (PAGE_SIZE - 1);
1323 plen = min_t(unsigned int, headlen - len, PAGE_SIZE - poff);
1324 len += plen;
1325
1326 if (toff) {
1327 if (plen <= toff) {
1328 toff -= plen;
1329 continue;
1330 }
1331 plen -= toff;
1332 poff += toff;
1333 toff = 0;
1334 }
1335 1316
1336 plen = min(plen, tlen); 1317 /* the linear region may spread across several pages */
1337 if (!plen) 1318 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1338 break;
1339 1319
1340 /* 1320 if (spd_fill_page(spd, page, flen, poff, skb))
1341 * just jump directly to update and return, no point 1321 return 1;
1342 * in going over fragments when the output is full.
1343 */
1344 error = spd_fill_page(spd, virt_to_page(p), plen, poff, skb);
1345 if (error)
1346 goto done;
1347 1322
1348 tlen -= plen; 1323 __segment_seek(&page, &poff, &plen, flen);
1349 } 1324 *len -= flen;
1325
1326 } while (*len && plen);
1327
1328 return 0;
1329}
1330
1331/*
1332 * Map linear and fragment data from the skb to spd. It reports failure if the
1333 * pipe is full or if we already spliced the requested length.
1334 */
1335static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1336 unsigned int *len,
1337 struct splice_pipe_desc *spd)
1338{
1339 int seg;
1340
1341 /*
1342 * map the linear part
1343 */
1344 if (__splice_segment(virt_to_page(skb->data),
1345 (unsigned long) skb->data & (PAGE_SIZE - 1),
1346 skb_headlen(skb),
1347 offset, len, skb, spd))
1348 return 1;
1350 1349
1351 /* 1350 /*
1352 * then map the fragments 1351 * then map the fragments
1353 */ 1352 */
1354map_frag:
1355 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1353 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1356 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1354 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1357 1355
1358 plen = f->size; 1356 if (__splice_segment(f->page, f->page_offset, f->size,
1359 poff = f->page_offset; 1357 offset, len, skb, spd))
1360 1358 return 1;
1361 if (toff) {
1362 if (plen <= toff) {
1363 toff -= plen;
1364 continue;
1365 }
1366 plen -= toff;
1367 poff += toff;
1368 toff = 0;
1369 }
1370
1371 plen = min(plen, tlen);
1372 if (!plen)
1373 break;
1374
1375 error = spd_fill_page(spd, f->page, plen, poff, skb);
1376 if (error)
1377 break;
1378
1379 tlen -= plen;
1380 } 1359 }
1381 1360
1382done: 1361 return 0;
1383 if (spd->nr_pages - nr_pages) {
1384 *offset = 0;
1385 *total_len = tlen;
1386 return 0;
1387 }
1388err:
1389 /* update the offset to reflect the linear part skip, if any */
1390 if (!error)
1391 *offset = toff;
1392 return error;
1393} 1362}
1394 1363
1395/* 1364/*
@@ -2288,6 +2257,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2288 skb_copy_queue_mapping(nskb, skb); 2257 skb_copy_queue_mapping(nskb, skb);
2289 nskb->priority = skb->priority; 2258 nskb->priority = skb->priority;
2290 nskb->protocol = skb->protocol; 2259 nskb->protocol = skb->protocol;
2260 nskb->vlan_tci = skb->vlan_tci;
2291 nskb->dst = dst_clone(skb->dst); 2261 nskb->dst = dst_clone(skb->dst);
2292 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 2262 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
2293 nskb->pkt_type = skb->pkt_type; 2263 nskb->pkt_type = skb->pkt_type;
@@ -2592,6 +2562,13 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
2592 return true; 2562 return true;
2593} 2563}
2594 2564
2565void __skb_warn_lro_forwarding(const struct sk_buff *skb)
2566{
2567 if (net_ratelimit())
2568 pr_warning("%s: received packets cannot be forwarded"
2569 " while LRO is enabled\n", skb->dev->name);
2570}
2571
2595EXPORT_SYMBOL(___pskb_trim); 2572EXPORT_SYMBOL(___pskb_trim);
2596EXPORT_SYMBOL(__kfree_skb); 2573EXPORT_SYMBOL(__kfree_skb);
2597EXPORT_SYMBOL(kfree_skb); 2574EXPORT_SYMBOL(kfree_skb);
@@ -2625,6 +2602,7 @@ EXPORT_SYMBOL(skb_seq_read);
2625EXPORT_SYMBOL(skb_abort_seq_read); 2602EXPORT_SYMBOL(skb_abort_seq_read);
2626EXPORT_SYMBOL(skb_find_text); 2603EXPORT_SYMBOL(skb_find_text);
2627EXPORT_SYMBOL(skb_append_datato_frags); 2604EXPORT_SYMBOL(skb_append_datato_frags);
2605EXPORT_SYMBOL(__skb_warn_lro_forwarding);
2628 2606
2629EXPORT_SYMBOL_GPL(skb_to_sgvec); 2607EXPORT_SYMBOL_GPL(skb_to_sgvec);
2630EXPORT_SYMBOL_GPL(skb_cow_data); 2608EXPORT_SYMBOL_GPL(skb_cow_data);
diff --git a/net/core/sock.c b/net/core/sock.c
index 88094cb09c06..10a64d57078c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -7,8 +7,6 @@
7 * handler for protocols to use and generic option handler. 7 * handler for protocols to use and generic option handler.
8 * 8 *
9 * 9 *
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
11 *
12 * Authors: Ross Biro 10 * Authors: Ross Biro
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de> 12 * Florian La Roche, <flla@stud.uni-sb.de>
@@ -1068,7 +1066,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1068 * to be taken into account in all callers. -acme 1066 * to be taken into account in all callers. -acme
1069 */ 1067 */
1070 sk_refcnt_debug_inc(newsk); 1068 sk_refcnt_debug_inc(newsk);
1071 newsk->sk_socket = NULL; 1069 sk_set_socket(newsk, NULL);
1072 newsk->sk_sleep = NULL; 1070 newsk->sk_sleep = NULL;
1073 1071
1074 if (newsk->sk_prot->sockets_allocated) 1072 if (newsk->sk_prot->sockets_allocated)
@@ -1444,7 +1442,7 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
1444 /* Under pressure. */ 1442 /* Under pressure. */
1445 if (allocated > prot->sysctl_mem[1]) 1443 if (allocated > prot->sysctl_mem[1])
1446 if (prot->enter_memory_pressure) 1444 if (prot->enter_memory_pressure)
1447 prot->enter_memory_pressure(); 1445 prot->enter_memory_pressure(sk);
1448 1446
1449 /* Over hard limit. */ 1447 /* Over hard limit. */
1450 if (allocated > prot->sysctl_mem[2]) 1448 if (allocated > prot->sysctl_mem[2])
@@ -1704,7 +1702,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1704 sk->sk_rcvbuf = sysctl_rmem_default; 1702 sk->sk_rcvbuf = sysctl_rmem_default;
1705 sk->sk_sndbuf = sysctl_wmem_default; 1703 sk->sk_sndbuf = sysctl_wmem_default;
1706 sk->sk_state = TCP_CLOSE; 1704 sk->sk_state = TCP_CLOSE;
1707 sk->sk_socket = sock; 1705 sk_set_socket(sk, sock);
1708 1706
1709 sock_set_flag(sk, SOCK_ZAPPED); 1707 sock_set_flag(sk, SOCK_ZAPPED);
1710 1708
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 5fc801057244..a570e2af22cb 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -125,14 +125,6 @@ static struct ctl_table net_core_table[] = {
125#endif /* CONFIG_XFRM */ 125#endif /* CONFIG_XFRM */
126#endif /* CONFIG_NET */ 126#endif /* CONFIG_NET */
127 { 127 {
128 .ctl_name = NET_CORE_SOMAXCONN,
129 .procname = "somaxconn",
130 .data = &init_net.core.sysctl_somaxconn,
131 .maxlen = sizeof(int),
132 .mode = 0644,
133 .proc_handler = &proc_dointvec
134 },
135 {
136 .ctl_name = NET_CORE_BUDGET, 128 .ctl_name = NET_CORE_BUDGET,
137 .procname = "netdev_budget", 129 .procname = "netdev_budget",
138 .data = &netdev_budget, 130 .data = &netdev_budget,
@@ -151,6 +143,18 @@ static struct ctl_table net_core_table[] = {
151 { .ctl_name = 0 } 143 { .ctl_name = 0 }
152}; 144};
153 145
146static struct ctl_table netns_core_table[] = {
147 {
148 .ctl_name = NET_CORE_SOMAXCONN,
149 .procname = "somaxconn",
150 .data = &init_net.core.sysctl_somaxconn,
151 .maxlen = sizeof(int),
152 .mode = 0644,
153 .proc_handler = &proc_dointvec
154 },
155 { .ctl_name = 0 }
156};
157
154static __net_initdata struct ctl_path net_core_path[] = { 158static __net_initdata struct ctl_path net_core_path[] = {
155 { .procname = "net", .ctl_name = CTL_NET, }, 159 { .procname = "net", .ctl_name = CTL_NET, },
156 { .procname = "core", .ctl_name = NET_CORE, }, 160 { .procname = "core", .ctl_name = NET_CORE, },
@@ -159,23 +163,17 @@ static __net_initdata struct ctl_path net_core_path[] = {
159 163
160static __net_init int sysctl_core_net_init(struct net *net) 164static __net_init int sysctl_core_net_init(struct net *net)
161{ 165{
162 struct ctl_table *tbl, *tmp; 166 struct ctl_table *tbl;
163 167
164 net->core.sysctl_somaxconn = SOMAXCONN; 168 net->core.sysctl_somaxconn = SOMAXCONN;
165 169
166 tbl = net_core_table; 170 tbl = netns_core_table;
167 if (net != &init_net) { 171 if (net != &init_net) {
168 tbl = kmemdup(tbl, sizeof(net_core_table), GFP_KERNEL); 172 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
169 if (tbl == NULL) 173 if (tbl == NULL)
170 goto err_dup; 174 goto err_dup;
171 175
172 for (tmp = tbl; tmp->procname; tmp++) { 176 tbl[0].data = &net->core.sysctl_somaxconn;
173 if (tmp->data >= (void *)&init_net &&
174 tmp->data < (void *)(&init_net + 1))
175 tmp->data += (char *)net - (char *)&init_net;
176 else
177 tmp->mode &= ~0222;
178 }
179 } 177 }
180 178
181 net->core.sysctl_hdr = register_net_sysctl_table(net, 179 net->core.sysctl_hdr = register_net_sysctl_table(net,
@@ -186,7 +184,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
186 return 0; 184 return 0;
187 185
188err_reg: 186err_reg:
189 if (tbl != net_core_table) 187 if (tbl != netns_core_table)
190 kfree(tbl); 188 kfree(tbl);
191err_dup: 189err_dup:
192 return -ENOMEM; 190 return -ENOMEM;
@@ -198,7 +196,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
198 196
199 tbl = net->core.sysctl_hdr->ctl_table_arg; 197 tbl = net->core.sysctl_hdr->ctl_table_arg;
200 unregister_net_sysctl_table(net->core.sysctl_hdr); 198 unregister_net_sysctl_table(net->core.sysctl_hdr);
201 BUG_ON(tbl == net_core_table); 199 BUG_ON(tbl == netns_core_table);
202 kfree(tbl); 200 kfree(tbl);
203} 201}
204 202
@@ -209,6 +207,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = {
209 207
210static __init int sysctl_core_init(void) 208static __init int sysctl_core_init(void)
211{ 209{
210 register_net_sysctl_rotable(net_core_path, net_core_table);
212 return register_pernet_subsys(&sysctl_core_ops); 211 return register_pernet_subsys(&sysctl_core_ops);
213} 212}
214 213
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index a1929f33d703..f6756e0c9e69 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -794,7 +794,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
794{ 794{
795 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 795 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
796 enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; 796 enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE;
797 const u32 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; 797 const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
798 const bool is_data_packet = dccp_data_packet(skb); 798 const bool is_data_packet = dccp_data_packet(skb);
799 799
800 if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)) { 800 if (unlikely(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)) {
@@ -825,18 +825,16 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
825 } 825 }
826 826
827 /* 827 /*
828 * Handle pending losses and otherwise check for new loss 828 * Perform loss detection and handle pending losses
829 */ 829 */
830 if (tfrc_rx_hist_loss_pending(&hcrx->ccid3hcrx_hist) && 830 if (tfrc_rx_handle_loss(&hcrx->ccid3hcrx_hist, &hcrx->ccid3hcrx_li_hist,
831 tfrc_rx_handle_loss(&hcrx->ccid3hcrx_hist, 831 skb, ndp, ccid3_first_li, sk)) {
832 &hcrx->ccid3hcrx_li_hist,
833 skb, ndp, ccid3_first_li, sk) ) {
834 do_feedback = CCID3_FBACK_PARAM_CHANGE; 832 do_feedback = CCID3_FBACK_PARAM_CHANGE;
835 goto done_receiving; 833 goto done_receiving;
836 } 834 }
837 835
838 if (tfrc_rx_hist_new_loss_indicated(&hcrx->ccid3hcrx_hist, skb, ndp)) 836 if (tfrc_rx_hist_loss_pending(&hcrx->ccid3hcrx_hist))
839 goto update_records; 837 return; /* done receiving */
840 838
841 /* 839 /*
842 * Handle data packets: RTT sampling and monitoring p 840 * Handle data packets: RTT sampling and monitoring p
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 849e181e698f..bcd6ac415bb9 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -90,14 +90,14 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
90{ 90{
91 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh); 91 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh);
92 u32 old_i_mean = lh->i_mean; 92 u32 old_i_mean = lh->i_mean;
93 s64 length; 93 s64 len;
94 94
95 if (cur == NULL) /* not initialised */ 95 if (cur == NULL) /* not initialised */
96 return 0; 96 return 0;
97 97
98 length = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq); 98 len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1;
99 99
100 if (length - cur->li_length <= 0) /* duplicate or reordered */ 100 if (len - (s64)cur->li_length <= 0) /* duplicate or reordered */
101 return 0; 101 return 0;
102 102
103 if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4) 103 if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4)
@@ -114,7 +114,7 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
114 if (tfrc_lh_length(lh) == 1) /* due to RFC 3448, 6.3.1 */ 114 if (tfrc_lh_length(lh) == 1) /* due to RFC 3448, 6.3.1 */
115 return 0; 115 return 0;
116 116
117 cur->li_length = length; 117 cur->li_length = len;
118 tfrc_lh_calc_i_mean(lh); 118 tfrc_lh_calc_i_mean(lh);
119 119
120 return (lh->i_mean < old_i_mean); 120 return (lh->i_mean < old_i_mean);
@@ -159,7 +159,7 @@ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
159 else { 159 else {
160 cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); 160 cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno);
161 new->li_length = dccp_delta_seqno(new->li_seqno, 161 new->li_length = dccp_delta_seqno(new->li_seqno,
162 tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno); 162 tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno) + 1;
163 if (lh->counter > (2*LIH_SIZE)) 163 if (lh->counter > (2*LIH_SIZE))
164 lh->counter -= LIH_SIZE; 164 lh->counter -= LIH_SIZE;
165 165
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index 20af1a693427..6cc108afdc3b 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -153,7 +153,7 @@ void tfrc_rx_packet_history_exit(void)
153 153
154static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, 154static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry,
155 const struct sk_buff *skb, 155 const struct sk_buff *skb,
156 const u32 ndp) 156 const u64 ndp)
157{ 157{
158 const struct dccp_hdr *dh = dccp_hdr(skb); 158 const struct dccp_hdr *dh = dccp_hdr(skb);
159 159
@@ -166,7 +166,7 @@ static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry,
166 166
167void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, 167void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
168 const struct sk_buff *skb, 168 const struct sk_buff *skb,
169 const u32 ndp) 169 const u64 ndp)
170{ 170{
171 struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h); 171 struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h);
172 172
@@ -206,31 +206,39 @@ static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b)
206 * 206 *
207 * In the descriptions, `Si' refers to the sequence number of entry number i, 207 * In the descriptions, `Si' refers to the sequence number of entry number i,
208 * whose NDP count is `Ni' (lower case is used for variables). 208 * whose NDP count is `Ni' (lower case is used for variables).
209 * Note: All __after_loss functions expect that a test against duplicates has 209 * Note: All __xxx_loss functions expect that a test against duplicates has been
210 * been performed already: the seqno of the skb must not be less than the 210 * performed already: the seqno of the skb must not be less than the seqno
211 * seqno of loss_prev; and it must not equal that of any valid hist_entry. 211 * of loss_prev; and it must not equal that of any valid history entry.
212 */ 212 */
213static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1)
214{
215 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
216 s1 = DCCP_SKB_CB(skb)->dccpd_seq;
217
218 if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */
219 h->loss_count = 1;
220 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1);
221 }
222}
223
213static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2) 224static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2)
214{ 225{
215 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, 226 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
216 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 227 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
217 s2 = DCCP_SKB_CB(skb)->dccpd_seq; 228 s2 = DCCP_SKB_CB(skb)->dccpd_seq;
218 int n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp,
219 d12 = dccp_delta_seqno(s1, s2), d2;
220 229
221 if (d12 > 0) { /* S1 < S2 */ 230 if (likely(dccp_delta_seqno(s1, s2) > 0)) { /* S1 < S2 */
222 h->loss_count = 2; 231 h->loss_count = 2;
223 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2); 232 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2);
224 return; 233 return;
225 } 234 }
226 235
227 /* S0 < S2 < S1 */ 236 /* S0 < S2 < S1 */
228 d2 = dccp_delta_seqno(s0, s2);
229 237
230 if (d2 == 1 || n2 >= d2) { /* S2 is direct successor of S0 */ 238 if (dccp_loss_free(s0, s2, n2)) {
231 int d21 = -d12; 239 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
232 240
233 if (d21 == 1 || n1 >= d21) { 241 if (dccp_loss_free(s2, s1, n1)) {
234 /* hole is filled: S0, S2, and S1 are consecutive */ 242 /* hole is filled: S0, S2, and S1 are consecutive */
235 h->loss_count = 0; 243 h->loss_count = 0;
236 h->loss_start = tfrc_rx_hist_index(h, 1); 244 h->loss_start = tfrc_rx_hist_index(h, 1);
@@ -238,9 +246,9 @@ static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2
238 /* gap between S2 and S1: just update loss_prev */ 246 /* gap between S2 and S1: just update loss_prev */
239 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2); 247 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2);
240 248
241 } else { /* hole between S0 and S2 */ 249 } else { /* gap between S0 and S2 */
242 /* 250 /*
243 * Reorder history to insert S2 between S0 and s1 251 * Reorder history to insert S2 between S0 and S1
244 */ 252 */
245 tfrc_rx_hist_swap(h, 0, 3); 253 tfrc_rx_hist_swap(h, 0, 3);
246 h->loss_start = tfrc_rx_hist_index(h, 3); 254 h->loss_start = tfrc_rx_hist_index(h, 3);
@@ -256,22 +264,18 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
256 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 264 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
257 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, 265 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
258 s3 = DCCP_SKB_CB(skb)->dccpd_seq; 266 s3 = DCCP_SKB_CB(skb)->dccpd_seq;
259 int n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp,
260 d23 = dccp_delta_seqno(s2, s3), d13, d3, d31;
261 267
262 if (d23 > 0) { /* S2 < S3 */ 268 if (likely(dccp_delta_seqno(s2, s3) > 0)) { /* S2 < S3 */
263 h->loss_count = 3; 269 h->loss_count = 3;
264 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3); 270 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3);
265 return 1; 271 return 1;
266 } 272 }
267 273
268 /* S3 < S2 */ 274 /* S3 < S2 */
269 d13 = dccp_delta_seqno(s1, s3);
270 275
271 if (d13 > 0) { 276 if (dccp_delta_seqno(s1, s3) > 0) { /* S1 < S3 < S2 */
272 /* 277 /*
273 * The sequence number order is S1, S3, S2 278 * Reorder history to insert S3 between S1 and S2
274 * Reorder history to insert entry between S1 and S2
275 */ 279 */
276 tfrc_rx_hist_swap(h, 2, 3); 280 tfrc_rx_hist_swap(h, 2, 3);
277 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3); 281 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3);
@@ -280,17 +284,15 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
280 } 284 }
281 285
282 /* S0 < S3 < S1 */ 286 /* S0 < S3 < S1 */
283 d31 = -d13;
284 d3 = dccp_delta_seqno(s0, s3);
285 287
286 if (d3 == 1 || n3 >= d3) { /* S3 is a successor of S0 */ 288 if (dccp_loss_free(s0, s3, n3)) {
289 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
287 290
288 if (d31 == 1 || n1 >= d31) { 291 if (dccp_loss_free(s3, s1, n1)) {
289 /* hole between S0 and S1 filled by S3 */ 292 /* hole between S0 and S1 filled by S3 */
290 int d2 = dccp_delta_seqno(s1, s2), 293 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp;
291 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp;
292 294
293 if (d2 == 1 || n2 >= d2) { 295 if (dccp_loss_free(s1, s2, n2)) {
294 /* entire hole filled by S0, S3, S1, S2 */ 296 /* entire hole filled by S0, S3, S1, S2 */
295 h->loss_start = tfrc_rx_hist_index(h, 2); 297 h->loss_start = tfrc_rx_hist_index(h, 2);
296 h->loss_count = 0; 298 h->loss_count = 0;
@@ -307,8 +309,8 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
307 } 309 }
308 310
309 /* 311 /*
310 * The remaining case: S3 is not a successor of S0. 312 * The remaining case: S0 < S3 < S1 < S2; gap between S0 and S3
311 * Sequence order is S0, S3, S1, S2; reorder to insert between S0 and S1 313 * Reorder history to insert S3 between S0 and S1.
312 */ 314 */
313 tfrc_rx_hist_swap(h, 0, 3); 315 tfrc_rx_hist_swap(h, 0, 3);
314 h->loss_start = tfrc_rx_hist_index(h, 3); 316 h->loss_start = tfrc_rx_hist_index(h, 3);
@@ -318,33 +320,25 @@ static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
318 return 1; 320 return 1;
319} 321}
320 322
321/* return the signed modulo-2^48 sequence number distance from entry e1 to e2 */
322static s64 tfrc_rx_hist_delta_seqno(struct tfrc_rx_hist *h, u8 e1, u8 e2)
323{
324 DCCP_BUG_ON(e1 > h->loss_count || e2 > h->loss_count);
325
326 return dccp_delta_seqno(tfrc_rx_hist_entry(h, e1)->tfrchrx_seqno,
327 tfrc_rx_hist_entry(h, e2)->tfrchrx_seqno);
328}
329
330/* recycle RX history records to continue loss detection if necessary */ 323/* recycle RX history records to continue loss detection if necessary */
331static void __three_after_loss(struct tfrc_rx_hist *h) 324static void __three_after_loss(struct tfrc_rx_hist *h)
332{ 325{
333 /* 326 /*
334 * The distance between S0 and S1 is always greater than 1 and the NDP 327 * At this stage we know already that there is a gap between S0 and S1
335 * count of S1 is smaller than this distance. Otherwise there would 328 * (since S0 was the highest sequence number received before detecting
336 * have been no loss. Hence it is only necessary to see whether there 329 * the loss). To recycle the loss record, it is thus only necessary to
337 * are further missing data packets between S1/S2 and S2/S3. 330 * check for other possible gaps between S1/S2 and between S2/S3.
338 */ 331 */
339 int d2 = tfrc_rx_hist_delta_seqno(h, 1, 2), 332 u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
340 d3 = tfrc_rx_hist_delta_seqno(h, 2, 3), 333 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
341 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp, 334 s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno;
335 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp,
342 n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp; 336 n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp;
343 337
344 if (d2 == 1 || n2 >= d2) { /* S2 is successor to S1 */ 338 if (dccp_loss_free(s1, s2, n2)) {
345 339
346 if (d3 == 1 || n3 >= d3) { 340 if (dccp_loss_free(s2, s3, n3)) {
347 /* S3 is successor of S2: entire hole is filled */ 341 /* no gap between S2 and S3: entire hole is filled */
348 h->loss_start = tfrc_rx_hist_index(h, 3); 342 h->loss_start = tfrc_rx_hist_index(h, 3);
349 h->loss_count = 0; 343 h->loss_count = 0;
350 } else { 344 } else {
@@ -353,7 +347,7 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
353 h->loss_count = 1; 347 h->loss_count = 1;
354 } 348 }
355 349
356 } else { /* gap between S1 and S2 */ 350 } else { /* gap between S1 and S2 */
357 h->loss_start = tfrc_rx_hist_index(h, 1); 351 h->loss_start = tfrc_rx_hist_index(h, 1);
358 h->loss_count = 2; 352 h->loss_count = 2;
359 } 353 }
@@ -370,15 +364,20 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
370 * Chooses action according to pending loss, updates LI database when a new 364 * Chooses action according to pending loss, updates LI database when a new
371 * loss was detected, and does required post-processing. Returns 1 when caller 365 * loss was detected, and does required post-processing. Returns 1 when caller
372 * should send feedback, 0 otherwise. 366 * should send feedback, 0 otherwise.
367 * Since it also takes care of reordering during loss detection and updates the
368 * records accordingly, the caller should not perform any more RX history
369 * operations when loss_count is greater than 0 after calling this function.
373 */ 370 */
374int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, 371int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
375 struct tfrc_loss_hist *lh, 372 struct tfrc_loss_hist *lh,
376 struct sk_buff *skb, u32 ndp, 373 struct sk_buff *skb, const u64 ndp,
377 u32 (*calc_first_li)(struct sock *), struct sock *sk) 374 u32 (*calc_first_li)(struct sock *), struct sock *sk)
378{ 375{
379 int is_new_loss = 0; 376 int is_new_loss = 0;
380 377
381 if (h->loss_count == 1) { 378 if (h->loss_count == 0) {
379 __do_track_loss(h, skb, ndp);
380 } else if (h->loss_count == 1) {
382 __one_after_loss(h, skb, ndp); 381 __one_after_loss(h, skb, ndp);
383 } else if (h->loss_count != 2) { 382 } else if (h->loss_count != 2) {
384 DCCP_BUG("invalid loss_count %d", h->loss_count); 383 DCCP_BUG("invalid loss_count %d", h->loss_count);
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index c7eeda49cb20..461cc91cce88 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -64,7 +64,7 @@ struct tfrc_rx_hist_entry {
64 u64 tfrchrx_seqno:48, 64 u64 tfrchrx_seqno:48,
65 tfrchrx_ccval:4, 65 tfrchrx_ccval:4,
66 tfrchrx_type:4; 66 tfrchrx_type:4;
67 u32 tfrchrx_ndp; /* In fact it is from 8 to 24 bits */ 67 u64 tfrchrx_ndp:48;
68 ktime_t tfrchrx_tstamp; 68 ktime_t tfrchrx_tstamp;
69}; 69};
70 70
@@ -118,41 +118,21 @@ static inline struct tfrc_rx_hist_entry *
118 return h->ring[h->loss_start]; 118 return h->ring[h->loss_start];
119} 119}
120 120
121/* initialise loss detection and disable RTT sampling */
122static inline void tfrc_rx_hist_loss_indicated(struct tfrc_rx_hist *h)
123{
124 h->loss_count = 1;
125}
126
127/* indicate whether previously a packet was detected missing */ 121/* indicate whether previously a packet was detected missing */
128static inline int tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h) 122static inline bool tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h)
129{
130 return h->loss_count;
131}
132
133/* any data packets missing between last reception and skb ? */
134static inline int tfrc_rx_hist_new_loss_indicated(struct tfrc_rx_hist *h,
135 const struct sk_buff *skb,
136 u32 ndp)
137{ 123{
138 int delta = dccp_delta_seqno(tfrc_rx_hist_last_rcv(h)->tfrchrx_seqno, 124 return h->loss_count > 0;
139 DCCP_SKB_CB(skb)->dccpd_seq);
140
141 if (delta > 1 && ndp < delta)
142 tfrc_rx_hist_loss_indicated(h);
143
144 return tfrc_rx_hist_loss_pending(h);
145} 125}
146 126
147extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, 127extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
148 const struct sk_buff *skb, const u32 ndp); 128 const struct sk_buff *skb, const u64 ndp);
149 129
150extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb); 130extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
151 131
152struct tfrc_loss_hist; 132struct tfrc_loss_hist;
153extern int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, 133extern int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
154 struct tfrc_loss_hist *lh, 134 struct tfrc_loss_hist *lh,
155 struct sk_buff *skb, u32 ndp, 135 struct sk_buff *skb, const u64 ndp,
156 u32 (*first_li)(struct sock *sk), 136 u32 (*first_li)(struct sock *sk),
157 struct sock *sk); 137 struct sock *sk);
158extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, 138extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h,
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index f44d492d3b74..32617e0576cb 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -153,6 +153,21 @@ static inline u64 max48(const u64 seq1, const u64 seq2)
153 return after48(seq1, seq2) ? seq1 : seq2; 153 return after48(seq1, seq2) ? seq1 : seq2;
154} 154}
155 155
156/**
157 * dccp_loss_free - Evaluates condition for data loss from RFC 4340, 7.7.1
158 * @s1: start sequence number
159 * @s2: end sequence number
160 * @ndp: NDP count on packet with sequence number @s2
161 * Returns true if the sequence range s1...s2 has no data loss.
162 */
163static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp)
164{
165 s64 delta = dccp_delta_seqno(s1, s2);
166
167 BUG_TRAP(delta >= 0);
168 return (u64)delta <= ndp + 1;
169}
170
156enum { 171enum {
157 DCCP_MIB_NUM = 0, 172 DCCP_MIB_NUM = 0,
158 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ 173 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
@@ -262,7 +277,7 @@ extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
262 const struct dccp_hdr *dh, const unsigned len); 277 const struct dccp_hdr *dh, const unsigned len);
263 278
264extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); 279extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
265extern int dccp_destroy_sock(struct sock *sk); 280extern void dccp_destroy_sock(struct sock *sk);
266 281
267extern void dccp_close(struct sock *sk, long timeout); 282extern void dccp_close(struct sock *sk, long timeout);
268extern struct sk_buff *dccp_make_response(struct sock *sk, 283extern struct sk_buff *dccp_make_response(struct sock *sk,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 37d27bcb361f..2622ace17c46 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -205,17 +205,18 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
205 struct sock *sk; 205 struct sock *sk;
206 __u64 seq; 206 __u64 seq;
207 int err; 207 int err;
208 struct net *net = dev_net(skb->dev);
208 209
209 if (skb->len < (iph->ihl << 2) + 8) { 210 if (skb->len < (iph->ihl << 2) + 8) {
210 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 211 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
211 return; 212 return;
212 } 213 }
213 214
214 sk = inet_lookup(dev_net(skb->dev), &dccp_hashinfo, 215 sk = inet_lookup(net, &dccp_hashinfo,
215 iph->daddr, dh->dccph_dport, 216 iph->daddr, dh->dccph_dport,
216 iph->saddr, dh->dccph_sport, inet_iif(skb)); 217 iph->saddr, dh->dccph_sport, inet_iif(skb));
217 if (sk == NULL) { 218 if (sk == NULL) {
218 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 219 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
219 return; 220 return;
220 } 221 }
221 222
@@ -229,7 +230,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
229 * servers this needs to be solved differently. 230 * servers this needs to be solved differently.
230 */ 231 */
231 if (sock_owned_by_user(sk)) 232 if (sock_owned_by_user(sk))
232 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 233 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
233 234
234 if (sk->sk_state == DCCP_CLOSED) 235 if (sk->sk_state == DCCP_CLOSED)
235 goto out; 236 goto out;
@@ -238,7 +239,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
238 seq = dccp_hdr_seq(dh); 239 seq = dccp_hdr_seq(dh);
239 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && 240 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
240 !between48(seq, dp->dccps_swl, dp->dccps_swh)) { 241 !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
241 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 242 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
242 goto out; 243 goto out;
243 } 244 }
244 245
@@ -285,7 +286,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
285 BUG_TRAP(!req->sk); 286 BUG_TRAP(!req->sk);
286 287
287 if (seq != dccp_rsk(req)->dreq_iss) { 288 if (seq != dccp_rsk(req)->dreq_iss) {
288 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 289 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
289 goto out; 290 goto out;
290 } 291 }
291 /* 292 /*
@@ -408,9 +409,9 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
408 return newsk; 409 return newsk;
409 410
410exit_overflow: 411exit_overflow:
411 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 412 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
412exit: 413exit:
413 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 414 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
414 dst_release(dst); 415 dst_release(dst);
415 return NULL; 416 return NULL;
416} 417}
@@ -464,7 +465,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
464 465
465 security_skb_classify_flow(skb, &fl); 466 security_skb_classify_flow(skb, &fl);
466 if (ip_route_output_flow(net, &rt, &fl, sk, 0)) { 467 if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
467 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 468 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
468 return NULL; 469 return NULL;
469 } 470 }
470 471
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index f7fe2a572d7b..b74e8b2cbe55 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -93,8 +93,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
93 struct sock *sk; 93 struct sock *sk;
94 int err; 94 int err;
95 __u64 seq; 95 __u64 seq;
96 struct net *net = dev_net(skb->dev);
96 97
97 sk = inet6_lookup(dev_net(skb->dev), &dccp_hashinfo, 98 sk = inet6_lookup(net, &dccp_hashinfo,
98 &hdr->daddr, dh->dccph_dport, 99 &hdr->daddr, dh->dccph_dport,
99 &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); 100 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
100 101
@@ -110,7 +111,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
110 111
111 bh_lock_sock(sk); 112 bh_lock_sock(sk);
112 if (sock_owned_by_user(sk)) 113 if (sock_owned_by_user(sk))
113 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 114 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
114 115
115 if (sk->sk_state == DCCP_CLOSED) 116 if (sk->sk_state == DCCP_CLOSED)
116 goto out; 117 goto out;
@@ -188,7 +189,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
188 BUG_TRAP(req->sk == NULL); 189 BUG_TRAP(req->sk == NULL);
189 190
190 if (seq != dccp_rsk(req)->dreq_iss) { 191 if (seq != dccp_rsk(req)->dreq_iss) {
191 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 192 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
192 goto out; 193 goto out;
193 } 194 }
194 195
@@ -629,9 +630,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
629 return newsk; 630 return newsk;
630 631
631out_overflow: 632out_overflow:
632 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 633 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
633out: 634out:
634 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 635 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
635 if (opt != NULL && opt != np->opt) 636 if (opt != NULL && opt != np->opt)
636 sock_kfree_s(sk, opt, opt->tot_len); 637 sock_kfree_s(sk, opt, opt->tot_len);
637 dst_release(dst); 638 dst_release(dst);
@@ -1091,10 +1092,10 @@ static int dccp_v6_init_sock(struct sock *sk)
1091 return err; 1092 return err;
1092} 1093}
1093 1094
1094static int dccp_v6_destroy_sock(struct sock *sk) 1095static void dccp_v6_destroy_sock(struct sock *sk)
1095{ 1096{
1096 dccp_destroy_sock(sk); 1097 dccp_destroy_sock(sk);
1097 return inet6_destroy_sock(sk); 1098 inet6_destroy_sock(sk);
1098} 1099}
1099 1100
1100static struct timewait_sock_ops dccp6_timewait_sock_ops = { 1101static struct timewait_sock_ops dccp6_timewait_sock_ops = {
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 43bc24e761d0..dc7c158a2f4b 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -124,12 +124,12 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
124 mandatory = 1; 124 mandatory = 1;
125 break; 125 break;
126 case DCCPO_NDP_COUNT: 126 case DCCPO_NDP_COUNT:
127 if (len > 3) 127 if (len > 6)
128 goto out_invalid_option; 128 goto out_invalid_option;
129 129
130 opt_recv->dccpor_ndp = dccp_decode_value_var(value, len); 130 opt_recv->dccpor_ndp = dccp_decode_value_var(value, len);
131 dccp_pr_debug("%s rx opt: NDP count=%d\n", dccp_role(sk), 131 dccp_pr_debug("%s opt: NDP count=%llu\n", dccp_role(sk),
132 opt_recv->dccpor_ndp); 132 (unsigned long long)opt_recv->dccpor_ndp);
133 break; 133 break;
134 case DCCPO_CHANGE_L: 134 case DCCPO_CHANGE_L:
135 /* fall through */ 135 /* fall through */
@@ -307,9 +307,11 @@ static void dccp_encode_value_var(const u32 value, unsigned char *to,
307 *to++ = (value & 0xFF); 307 *to++ = (value & 0xFF);
308} 308}
309 309
310static inline int dccp_ndp_len(const int ndp) 310static inline u8 dccp_ndp_len(const u64 ndp)
311{ 311{
312 return likely(ndp <= 0xFF) ? 1 : ndp <= 0xFFFF ? 2 : 3; 312 if (likely(ndp <= 0xFF))
313 return 1;
314 return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
313} 315}
314 316
315int dccp_insert_option(struct sock *sk, struct sk_buff *skb, 317int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
@@ -336,7 +338,7 @@ EXPORT_SYMBOL_GPL(dccp_insert_option);
336static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb) 338static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb)
337{ 339{
338 struct dccp_sock *dp = dccp_sk(sk); 340 struct dccp_sock *dp = dccp_sk(sk);
339 int ndp = dp->dccps_ndp_count; 341 u64 ndp = dp->dccps_ndp_count;
340 342
341 if (dccp_non_data_packet(skb)) 343 if (dccp_non_data_packet(skb))
342 ++dp->dccps_ndp_count; 344 ++dp->dccps_ndp_count;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9dfe2470962c..a0b56009611f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -237,7 +237,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
237 237
238EXPORT_SYMBOL_GPL(dccp_init_sock); 238EXPORT_SYMBOL_GPL(dccp_init_sock);
239 239
240int dccp_destroy_sock(struct sock *sk) 240void dccp_destroy_sock(struct sock *sk)
241{ 241{
242 struct dccp_sock *dp = dccp_sk(sk); 242 struct dccp_sock *dp = dccp_sk(sk);
243 struct dccp_minisock *dmsk = dccp_msk(sk); 243 struct dccp_minisock *dmsk = dccp_msk(sk);
@@ -268,8 +268,6 @@ int dccp_destroy_sock(struct sock *sk)
268 268
269 /* clean up feature negotiation state */ 269 /* clean up feature negotiation state */
270 dccp_feat_clean(dmsk); 270 dccp_feat_clean(dmsk);
271
272 return 0;
273} 271}
274 272
275EXPORT_SYMBOL_GPL(dccp_destroy_sock); 273EXPORT_SYMBOL_GPL(dccp_destroy_sock);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 8703a792b560..3608d5342ca2 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -224,7 +224,7 @@ static void dccp_delack_timer(unsigned long data)
224 if (sock_owned_by_user(sk)) { 224 if (sock_owned_by_user(sk)) {
225 /* Try again later. */ 225 /* Try again later. */
226 icsk->icsk_ack.blocked = 1; 226 icsk->icsk_ack.blocked = 1;
227 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
228 sk_reset_timer(sk, &icsk->icsk_delack_timer, 228 sk_reset_timer(sk, &icsk->icsk_delack_timer,
229 jiffies + TCP_DELACK_MIN); 229 jiffies + TCP_DELACK_MIN);
230 goto out; 230 goto out;
@@ -254,7 +254,7 @@ static void dccp_delack_timer(unsigned long data)
254 icsk->icsk_ack.ato = TCP_ATO_MIN; 254 icsk->icsk_ack.ato = TCP_ATO_MIN;
255 } 255 }
256 dccp_send_ack(sk); 256 dccp_send_ack(sk);
257 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 257 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
258 } 258 }
259out: 259out:
260 bh_unlock_sock(sk); 260 bh_unlock_sock(sk);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index fc2efe899e91..61b7df577ddd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -451,7 +451,7 @@ static void dn_destruct(struct sock *sk)
451 451
452static int dn_memory_pressure; 452static int dn_memory_pressure;
453 453
454static void dn_enter_memory_pressure(void) 454static void dn_enter_memory_pressure(struct sock *sk)
455{ 455{
456 if (!dn_memory_pressure) { 456 if (!dn_memory_pressure) {
457 dn_memory_pressure = 1; 457 dn_memory_pressure = 1;
@@ -1719,6 +1719,8 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1719 * See if there is data ready to read, sleep if there isn't 1719 * See if there is data ready to read, sleep if there isn't
1720 */ 1720 */
1721 for(;;) { 1721 for(;;) {
1722 DEFINE_WAIT(wait);
1723
1722 if (sk->sk_err) 1724 if (sk->sk_err)
1723 goto out; 1725 goto out;
1724 1726
@@ -1748,14 +1750,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1748 goto out; 1750 goto out;
1749 } 1751 }
1750 1752
1751 set_bit(SOCK_ASYNC_WAITDATA, &sock->flags); 1753 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1752 SOCK_SLEEP_PRE(sk) 1754 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 1755 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1754 if (!dn_data_ready(sk, queue, flags, target)) 1756 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1755 schedule(); 1757 finish_wait(sk->sk_sleep, &wait);
1756
1757 SOCK_SLEEP_POST(sk)
1758 clear_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1759 } 1758 }
1760 1759
1761 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { 1760 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) {
@@ -2002,18 +2001,19 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
2002 * size. 2001 * size.
2003 */ 2002 */
2004 if (dn_queue_too_long(scp, queue, flags)) { 2003 if (dn_queue_too_long(scp, queue, flags)) {
2004 DEFINE_WAIT(wait);
2005
2005 if (flags & MSG_DONTWAIT) { 2006 if (flags & MSG_DONTWAIT) {
2006 err = -EWOULDBLOCK; 2007 err = -EWOULDBLOCK;
2007 goto out; 2008 goto out;
2008 } 2009 }
2009 2010
2010 SOCK_SLEEP_PRE(sk) 2011 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
2011 2012 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2012 if (dn_queue_too_long(scp, queue, flags)) 2013 sk_wait_event(sk, &timeo,
2013 schedule(); 2014 !dn_queue_too_long(scp, queue, flags));
2014 2015 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2015 SOCK_SLEEP_POST(sk) 2016 finish_wait(sk->sk_sleep, &wait);
2016
2017 continue; 2017 continue;
2018 } 2018 }
2019 2019
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 5b7539b7fe0c..14fbca55e908 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -229,7 +229,7 @@ static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
229 return 0; 229 return 0;
230} 230}
231 231
232static void dn_fib_rule_flush_cache(void) 232static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
233{ 233{
234 dn_rt_cache_flush(-1); 234 dn_rt_cache_flush(-1);
235} 235}
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 7c9bb13b1539..d35127bb84e1 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -573,9 +573,7 @@ static int econet_release(struct socket *sock)
573 573
574 sk->sk_state_change(sk); /* It is useless. Just for sanity. */ 574 sk->sk_state_change(sk); /* It is useless. Just for sanity. */
575 575
576 sock->sk = NULL; 576 sock_orphan(sk);
577 sk->sk_socket = NULL;
578 sock_set_flag(sk, SOCK_DEAD);
579 577
580 /* Purge queues */ 578 /* Purge queues */
581 579
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 200ee1e63728..69dbc342a464 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -391,7 +391,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
391 391
392 wstats.updated = 0; 392 wstats.updated = 0;
393 if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { 393 if (rx_stats->mask & IEEE80211_STATMASK_RSSI) {
394 wstats.level = rx_stats->rssi; 394 wstats.level = rx_stats->signal;
395 wstats.updated |= IW_QUAL_LEVEL_UPDATED; 395 wstats.updated |= IW_QUAL_LEVEL_UPDATED;
396 } else 396 } else
397 wstats.updated |= IW_QUAL_LEVEL_INVALID; 397 wstats.updated |= IW_QUAL_LEVEL_INVALID;
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index d8b02603cbe5..d996547f7a62 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -542,90 +542,4 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
542 return 1; 542 return 1;
543} 543}
544 544
545/* Incoming 802.11 strucure is converted to a TXB
546 * a block of 802.11 fragment packets (stored as skbs) */
547int ieee80211_tx_frame(struct ieee80211_device *ieee,
548 struct ieee80211_hdr *frame, int hdr_len, int total_len,
549 int encrypt_mpdu)
550{
551 struct ieee80211_txb *txb = NULL;
552 unsigned long flags;
553 struct net_device_stats *stats = &ieee->stats;
554 struct sk_buff *skb_frag;
555 int priority = -1;
556 int fraglen = total_len;
557 int headroom = ieee->tx_headroom;
558 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
559
560 spin_lock_irqsave(&ieee->lock, flags);
561
562 if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt))
563 encrypt_mpdu = 0;
564
565 /* If there is no driver handler to take the TXB, dont' bother
566 * creating it... */
567 if (!ieee->hard_start_xmit) {
568 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
569 goto success;
570 }
571
572 if (unlikely(total_len < 24)) {
573 printk(KERN_WARNING "%s: skb too small (%d).\n",
574 ieee->dev->name, total_len);
575 goto success;
576 }
577
578 if (encrypt_mpdu) {
579 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
580 fraglen += crypt->ops->extra_mpdu_prefix_len +
581 crypt->ops->extra_mpdu_postfix_len;
582 headroom += crypt->ops->extra_mpdu_prefix_len;
583 }
584
585 /* When we allocate the TXB we allocate enough space for the reserve
586 * and full fragment bytes (bytes_per_frag doesn't include prefix,
587 * postfix, header, FCS, etc.) */
588 txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC);
589 if (unlikely(!txb)) {
590 printk(KERN_WARNING "%s: Could not allocate TXB\n",
591 ieee->dev->name);
592 goto failed;
593 }
594 txb->encrypted = 0;
595 txb->payload_size = fraglen;
596
597 skb_frag = txb->fragments[0];
598
599 memcpy(skb_put(skb_frag, total_len), frame, total_len);
600
601 if (ieee->config &
602 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
603 skb_put(skb_frag, 4);
604
605 /* To avoid overcomplicating things, we do the corner-case frame
606 * encryption in software. The only real situation where encryption is
607 * needed here is during software-based shared key authentication. */
608 if (encrypt_mpdu)
609 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
610
611 success:
612 spin_unlock_irqrestore(&ieee->lock, flags);
613
614 if (txb) {
615 if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
616 stats->tx_packets++;
617 stats->tx_bytes += txb->payload_size;
618 return 0;
619 }
620 ieee80211_txb_free(txb);
621 }
622 return 0;
623
624 failed:
625 spin_unlock_irqrestore(&ieee->lock, flags);
626 stats->tx_errors++;
627 return 1;
628}
629
630EXPORT_SYMBOL(ieee80211_tx_frame);
631EXPORT_SYMBOL(ieee80211_txb_free); 545EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 623489afa62c..973832dd7faf 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -43,8 +43,9 @@ static const char *ieee80211_modes[] = {
43 43
44#define MAX_CUSTOM_LEN 64 44#define MAX_CUSTOM_LEN 64
45static char *ieee80211_translate_scan(struct ieee80211_device *ieee, 45static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
46 char *start, char *stop, 46 char *start, char *stop,
47 struct ieee80211_network *network) 47 struct ieee80211_network *network,
48 struct iw_request_info *info)
48{ 49{
49 char custom[MAX_CUSTOM_LEN]; 50 char custom[MAX_CUSTOM_LEN];
50 char *p; 51 char *p;
@@ -57,7 +58,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
57 iwe.cmd = SIOCGIWAP; 58 iwe.cmd = SIOCGIWAP;
58 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 59 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
59 memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN); 60 memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
60 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_ADDR_LEN); 61 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
61 62
62 /* Remaining entries will be displayed in the order we provide them */ 63 /* Remaining entries will be displayed in the order we provide them */
63 64
@@ -66,17 +67,19 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
66 iwe.u.data.flags = 1; 67 iwe.u.data.flags = 1;
67 if (network->flags & NETWORK_EMPTY_ESSID) { 68 if (network->flags & NETWORK_EMPTY_ESSID) {
68 iwe.u.data.length = sizeof("<hidden>"); 69 iwe.u.data.length = sizeof("<hidden>");
69 start = iwe_stream_add_point(start, stop, &iwe, "<hidden>"); 70 start = iwe_stream_add_point(info, start, stop,
71 &iwe, "<hidden>");
70 } else { 72 } else {
71 iwe.u.data.length = min(network->ssid_len, (u8) 32); 73 iwe.u.data.length = min(network->ssid_len, (u8) 32);
72 start = iwe_stream_add_point(start, stop, &iwe, network->ssid); 74 start = iwe_stream_add_point(info, start, stop,
75 &iwe, network->ssid);
73 } 76 }
74 77
75 /* Add the protocol name */ 78 /* Add the protocol name */
76 iwe.cmd = SIOCGIWNAME; 79 iwe.cmd = SIOCGIWNAME;
77 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", 80 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s",
78 ieee80211_modes[network->mode]); 81 ieee80211_modes[network->mode]);
79 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_CHAR_LEN); 82 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
80 83
81 /* Add mode */ 84 /* Add mode */
82 iwe.cmd = SIOCGIWMODE; 85 iwe.cmd = SIOCGIWMODE;
@@ -86,7 +89,8 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
86 else 89 else
87 iwe.u.mode = IW_MODE_ADHOC; 90 iwe.u.mode = IW_MODE_ADHOC;
88 91
89 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_UINT_LEN); 92 start = iwe_stream_add_event(info, start, stop,
93 &iwe, IW_EV_UINT_LEN);
90 } 94 }
91 95
92 /* Add channel and frequency */ 96 /* Add channel and frequency */
@@ -95,7 +99,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
95 iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel); 99 iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel);
96 iwe.u.freq.e = 6; 100 iwe.u.freq.e = 6;
97 iwe.u.freq.i = 0; 101 iwe.u.freq.i = 0;
98 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_FREQ_LEN); 102 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
99 103
100 /* Add encryption capability */ 104 /* Add encryption capability */
101 iwe.cmd = SIOCGIWENCODE; 105 iwe.cmd = SIOCGIWENCODE;
@@ -104,12 +108,13 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
104 else 108 else
105 iwe.u.data.flags = IW_ENCODE_DISABLED; 109 iwe.u.data.flags = IW_ENCODE_DISABLED;
106 iwe.u.data.length = 0; 110 iwe.u.data.length = 0;
107 start = iwe_stream_add_point(start, stop, &iwe, network->ssid); 111 start = iwe_stream_add_point(info, start, stop,
112 &iwe, network->ssid);
108 113
109 /* Add basic and extended rates */ 114 /* Add basic and extended rates */
110 /* Rate : stuffing multiple values in a single event require a bit 115 /* Rate : stuffing multiple values in a single event require a bit
111 * more of magic - Jean II */ 116 * more of magic - Jean II */
112 current_val = start + IW_EV_LCP_LEN; 117 current_val = start + iwe_stream_lcp_len(info);
113 iwe.cmd = SIOCGIWRATE; 118 iwe.cmd = SIOCGIWRATE;
114 /* Those two flags are ignored... */ 119 /* Those two flags are ignored... */
115 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; 120 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
@@ -124,17 +129,19 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
124 /* Bit rate given in 500 kb/s units (+ 0x80) */ 129 /* Bit rate given in 500 kb/s units (+ 0x80) */
125 iwe.u.bitrate.value = ((rate & 0x7f) * 500000); 130 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
126 /* Add new value to event */ 131 /* Add new value to event */
127 current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); 132 current_val = iwe_stream_add_value(info, start, current_val,
133 stop, &iwe, IW_EV_PARAM_LEN);
128 } 134 }
129 for (; j < network->rates_ex_len; j++) { 135 for (; j < network->rates_ex_len; j++) {
130 rate = network->rates_ex[j] & 0x7F; 136 rate = network->rates_ex[j] & 0x7F;
131 /* Bit rate given in 500 kb/s units (+ 0x80) */ 137 /* Bit rate given in 500 kb/s units (+ 0x80) */
132 iwe.u.bitrate.value = ((rate & 0x7f) * 500000); 138 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
133 /* Add new value to event */ 139 /* Add new value to event */
134 current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); 140 current_val = iwe_stream_add_value(info, start, current_val,
141 stop, &iwe, IW_EV_PARAM_LEN);
135 } 142 }
136 /* Check if we added any rate */ 143 /* Check if we added any rate */
137 if((current_val - start) > IW_EV_LCP_LEN) 144 if ((current_val - start) > iwe_stream_lcp_len(info))
138 start = current_val; 145 start = current_val;
139 146
140 /* Add quality statistics */ 147 /* Add quality statistics */
@@ -181,14 +188,14 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
181 iwe.u.qual.level = network->stats.signal; 188 iwe.u.qual.level = network->stats.signal;
182 } 189 }
183 190
184 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); 191 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
185 192
186 iwe.cmd = IWEVCUSTOM; 193 iwe.cmd = IWEVCUSTOM;
187 p = custom; 194 p = custom;
188 195
189 iwe.u.data.length = p - custom; 196 iwe.u.data.length = p - custom;
190 if (iwe.u.data.length) 197 if (iwe.u.data.length)
191 start = iwe_stream_add_point(start, stop, &iwe, custom); 198 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
192 199
193 memset(&iwe, 0, sizeof(iwe)); 200 memset(&iwe, 0, sizeof(iwe));
194 if (network->wpa_ie_len) { 201 if (network->wpa_ie_len) {
@@ -196,7 +203,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
196 memcpy(buf, network->wpa_ie, network->wpa_ie_len); 203 memcpy(buf, network->wpa_ie, network->wpa_ie_len);
197 iwe.cmd = IWEVGENIE; 204 iwe.cmd = IWEVGENIE;
198 iwe.u.data.length = network->wpa_ie_len; 205 iwe.u.data.length = network->wpa_ie_len;
199 start = iwe_stream_add_point(start, stop, &iwe, buf); 206 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
200 } 207 }
201 208
202 memset(&iwe, 0, sizeof(iwe)); 209 memset(&iwe, 0, sizeof(iwe));
@@ -205,7 +212,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
205 memcpy(buf, network->rsn_ie, network->rsn_ie_len); 212 memcpy(buf, network->rsn_ie, network->rsn_ie_len);
206 iwe.cmd = IWEVGENIE; 213 iwe.cmd = IWEVGENIE;
207 iwe.u.data.length = network->rsn_ie_len; 214 iwe.u.data.length = network->rsn_ie_len;
208 start = iwe_stream_add_point(start, stop, &iwe, buf); 215 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
209 } 216 }
210 217
211 /* Add EXTRA: Age to display seconds since last beacon/probe response 218 /* Add EXTRA: Age to display seconds since last beacon/probe response
@@ -217,7 +224,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
217 jiffies_to_msecs(jiffies - network->last_scanned)); 224 jiffies_to_msecs(jiffies - network->last_scanned));
218 iwe.u.data.length = p - custom; 225 iwe.u.data.length = p - custom;
219 if (iwe.u.data.length) 226 if (iwe.u.data.length)
220 start = iwe_stream_add_point(start, stop, &iwe, custom); 227 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
221 228
222 /* Add spectrum management information */ 229 /* Add spectrum management information */
223 iwe.cmd = -1; 230 iwe.cmd = -1;
@@ -238,7 +245,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
238 245
239 if (iwe.cmd == IWEVCUSTOM) { 246 if (iwe.cmd == IWEVCUSTOM) {
240 iwe.u.data.length = p - custom; 247 iwe.u.data.length = p - custom;
241 start = iwe_stream_add_point(start, stop, &iwe, custom); 248 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
242 } 249 }
243 250
244 return start; 251 return start;
@@ -272,7 +279,8 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
272 279
273 if (ieee->scan_age == 0 || 280 if (ieee->scan_age == 0 ||
274 time_after(network->last_scanned + ieee->scan_age, jiffies)) 281 time_after(network->last_scanned + ieee->scan_age, jiffies))
275 ev = ieee80211_translate_scan(ieee, ev, stop, network); 282 ev = ieee80211_translate_scan(ieee, ev, stop, network,
283 info);
276 else 284 else
277 IEEE80211_DEBUG_SCAN("Not showing network '%s (" 285 IEEE80211_DEBUG_SCAN("Not showing network '%s ("
278 "%s)' due to age (%dms).\n", 286 "%s)' due to age (%dms).\n",
@@ -744,98 +752,9 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
744 return 0; 752 return 0;
745} 753}
746 754
747int ieee80211_wx_set_auth(struct net_device *dev,
748 struct iw_request_info *info,
749 union iwreq_data *wrqu,
750 char *extra)
751{
752 struct ieee80211_device *ieee = netdev_priv(dev);
753 unsigned long flags;
754 int err = 0;
755
756 spin_lock_irqsave(&ieee->lock, flags);
757
758 switch (wrqu->param.flags & IW_AUTH_INDEX) {
759 case IW_AUTH_WPA_VERSION:
760 case IW_AUTH_CIPHER_PAIRWISE:
761 case IW_AUTH_CIPHER_GROUP:
762 case IW_AUTH_KEY_MGMT:
763 /*
764 * Host AP driver does not use these parameters and allows
765 * wpa_supplicant to control them internally.
766 */
767 break;
768 case IW_AUTH_TKIP_COUNTERMEASURES:
769 break; /* FIXME */
770 case IW_AUTH_DROP_UNENCRYPTED:
771 ieee->drop_unencrypted = !!wrqu->param.value;
772 break;
773 case IW_AUTH_80211_AUTH_ALG:
774 break; /* FIXME */
775 case IW_AUTH_WPA_ENABLED:
776 ieee->privacy_invoked = ieee->wpa_enabled = !!wrqu->param.value;
777 break;
778 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
779 ieee->ieee802_1x = !!wrqu->param.value;
780 break;
781 case IW_AUTH_PRIVACY_INVOKED:
782 ieee->privacy_invoked = !!wrqu->param.value;
783 break;
784 default:
785 err = -EOPNOTSUPP;
786 break;
787 }
788 spin_unlock_irqrestore(&ieee->lock, flags);
789 return err;
790}
791
792int ieee80211_wx_get_auth(struct net_device *dev,
793 struct iw_request_info *info,
794 union iwreq_data *wrqu,
795 char *extra)
796{
797 struct ieee80211_device *ieee = netdev_priv(dev);
798 unsigned long flags;
799 int err = 0;
800
801 spin_lock_irqsave(&ieee->lock, flags);
802
803 switch (wrqu->param.flags & IW_AUTH_INDEX) {
804 case IW_AUTH_WPA_VERSION:
805 case IW_AUTH_CIPHER_PAIRWISE:
806 case IW_AUTH_CIPHER_GROUP:
807 case IW_AUTH_KEY_MGMT:
808 case IW_AUTH_TKIP_COUNTERMEASURES: /* FIXME */
809 case IW_AUTH_80211_AUTH_ALG: /* FIXME */
810 /*
811 * Host AP driver does not use these parameters and allows
812 * wpa_supplicant to control them internally.
813 */
814 err = -EOPNOTSUPP;
815 break;
816 case IW_AUTH_DROP_UNENCRYPTED:
817 wrqu->param.value = ieee->drop_unencrypted;
818 break;
819 case IW_AUTH_WPA_ENABLED:
820 wrqu->param.value = ieee->wpa_enabled;
821 break;
822 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
823 wrqu->param.value = ieee->ieee802_1x;
824 break;
825 default:
826 err = -EOPNOTSUPP;
827 break;
828 }
829 spin_unlock_irqrestore(&ieee->lock, flags);
830 return err;
831}
832
833EXPORT_SYMBOL(ieee80211_wx_set_encodeext); 755EXPORT_SYMBOL(ieee80211_wx_set_encodeext);
834EXPORT_SYMBOL(ieee80211_wx_get_encodeext); 756EXPORT_SYMBOL(ieee80211_wx_get_encodeext);
835 757
836EXPORT_SYMBOL(ieee80211_wx_get_scan); 758EXPORT_SYMBOL(ieee80211_wx_get_scan);
837EXPORT_SYMBOL(ieee80211_wx_set_encode); 759EXPORT_SYMBOL(ieee80211_wx_set_encode);
838EXPORT_SYMBOL(ieee80211_wx_get_encode); 760EXPORT_SYMBOL(ieee80211_wx_get_encode);
839
840EXPORT_SYMBOL_GPL(ieee80211_wx_set_auth);
841EXPORT_SYMBOL_GPL(ieee80211_wx_get_auth);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 24eca23c2db3..95a966dd1916 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PF_INET protocol family socket handler. 6 * PF_INET protocol family socket handler.
7 * 7 *
8 * Version: $Id: af_inet.c,v 1.137 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de> 10 * Florian La Roche, <flla@stud.uni-sb.de>
@@ -1365,7 +1363,7 @@ static int __init init_ipv4_mibs(void)
1365 sizeof(struct udp_mib)) < 0) 1363 sizeof(struct udp_mib)) < 0)
1366 goto err_udplite_mib; 1364 goto err_udplite_mib;
1367 1365
1368 tcp_mib_init(); 1366 tcp_mib_init(&init_net);
1369 1367
1370 return 0; 1368 return 0;
1371 1369
@@ -1481,14 +1479,15 @@ static int __init inet_init(void)
1481 * Initialise the multicast router 1479 * Initialise the multicast router
1482 */ 1480 */
1483#if defined(CONFIG_IP_MROUTE) 1481#if defined(CONFIG_IP_MROUTE)
1484 ip_mr_init(); 1482 if (ip_mr_init())
1483 printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n");
1485#endif 1484#endif
1486 /* 1485 /*
1487 * Initialise per-cpu ipv4 mibs 1486 * Initialise per-cpu ipv4 mibs
1488 */ 1487 */
1489 1488
1490 if (init_ipv4_mibs()) 1489 if (init_ipv4_mibs())
1491 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; 1490 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n");
1492 1491
1493 ipv4_proc_init(); 1492 ipv4_proc_init();
1494 1493
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 9b539fa9fe18..b043eda60b04 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1,7 +1,5 @@
1/* linux/net/ipv4/arp.c 1/* linux/net/ipv4/arp.c
2 * 2 *
3 * Version: $Id: arp.c,v 1.99 2001/08/30 22:55:42 davem Exp $
4 *
5 * Copyright (C) 1994 by Florian La Roche 3 * Copyright (C) 1994 by Florian La Roche
6 * 4 *
7 * This module implements the Address Resolution Protocol ARP (RFC 826), 5 * This module implements the Address Resolution Protocol ARP (RFC 826),
@@ -423,11 +421,12 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
423 struct rtable *rt; 421 struct rtable *rt;
424 int flag = 0; 422 int flag = 0;
425 /*unsigned long now; */ 423 /*unsigned long now; */
424 struct net *net = dev_net(dev);
426 425
427 if (ip_route_output_key(dev_net(dev), &rt, &fl) < 0) 426 if (ip_route_output_key(net, &rt, &fl) < 0)
428 return 1; 427 return 1;
429 if (rt->u.dst.dev != dev) { 428 if (rt->u.dst.dev != dev) {
430 NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); 429 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
431 flag = 1; 430 flag = 1;
432 } 431 }
433 ip_rt_put(rt); 432 ip_rt_put(rt);
@@ -1199,7 +1198,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo
1199 switch (event) { 1198 switch (event) {
1200 case NETDEV_CHANGEADDR: 1199 case NETDEV_CHANGEADDR:
1201 neigh_changeaddr(&arp_tbl, dev); 1200 neigh_changeaddr(&arp_tbl, dev);
1202 rt_cache_flush(0); 1201 rt_cache_flush(dev_net(dev), 0);
1203 break; 1202 break;
1204 default: 1203 default:
1205 break; 1204 break;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 0c0c73f368ce..5e6c5a0f3fde 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -52,7 +52,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
52 inet->sport, usin->sin_port, sk, 1); 52 inet->sport, usin->sin_port, sk, 1);
53 if (err) { 53 if (err) {
54 if (err == -ENETUNREACH) 54 if (err == -ENETUNREACH)
55 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 55 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
56 return err; 56 return err;
57 } 57 }
58 58
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 79a7ef6209ff..2e667e2f90df 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * NET3 IP device support routines. 2 * NET3 IP device support routines.
3 * 3 *
4 * Version: $Id: devinet.c,v 1.44 2001/10/31 21:55:54 davem Exp $
5 *
6 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
@@ -170,6 +168,8 @@ static struct in_device *inetdev_init(struct net_device *dev)
170 in_dev->dev = dev; 168 in_dev->dev = dev;
171 if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) 169 if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL)
172 goto out_kfree; 170 goto out_kfree;
171 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
172 dev_disable_lro(dev);
173 /* Reference in_dev->dev */ 173 /* Reference in_dev->dev */
174 dev_hold(dev); 174 dev_hold(dev);
175 /* Account for reference dev->ip_ptr (below) */ 175 /* Account for reference dev->ip_ptr (below) */
@@ -1013,7 +1013,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1013 memcpy(old, ifa->ifa_label, IFNAMSIZ); 1013 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1014 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 1014 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1015 if (named++ == 0) 1015 if (named++ == 0)
1016 continue; 1016 goto skip;
1017 dot = strchr(old, ':'); 1017 dot = strchr(old, ':');
1018 if (dot == NULL) { 1018 if (dot == NULL) {
1019 sprintf(old, ":%d", named); 1019 sprintf(old, ":%d", named);
@@ -1024,6 +1024,8 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1024 } else { 1024 } else {
1025 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); 1025 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1026 } 1026 }
1027skip:
1028 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1027 } 1029 }
1028} 1030}
1029 1031
@@ -1241,6 +1243,8 @@ static void inet_forward_change(struct net *net)
1241 read_lock(&dev_base_lock); 1243 read_lock(&dev_base_lock);
1242 for_each_netdev(net, dev) { 1244 for_each_netdev(net, dev) {
1243 struct in_device *in_dev; 1245 struct in_device *in_dev;
1246 if (on)
1247 dev_disable_lro(dev);
1244 rcu_read_lock(); 1248 rcu_read_lock();
1245 in_dev = __in_dev_get_rcu(dev); 1249 in_dev = __in_dev_get_rcu(dev);
1246 if (in_dev) 1250 if (in_dev)
@@ -1248,8 +1252,6 @@ static void inet_forward_change(struct net *net)
1248 rcu_read_unlock(); 1252 rcu_read_unlock();
1249 } 1253 }
1250 read_unlock(&dev_base_lock); 1254 read_unlock(&dev_base_lock);
1251
1252 rt_cache_flush(0);
1253} 1255}
1254 1256
1255static int devinet_conf_proc(ctl_table *ctl, int write, 1257static int devinet_conf_proc(ctl_table *ctl, int write,
@@ -1335,10 +1337,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1335 if (write && *valp != val) { 1337 if (write && *valp != val) {
1336 struct net *net = ctl->extra2; 1338 struct net *net = ctl->extra2;
1337 1339
1338 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) 1340 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1339 inet_forward_change(net); 1341 rtnl_lock();
1340 else if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) 1342 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1341 rt_cache_flush(0); 1343 inet_forward_change(net);
1344 } else if (*valp) {
1345 struct ipv4_devconf *cnf = ctl->extra1;
1346 struct in_device *idev =
1347 container_of(cnf, struct in_device, cnf);
1348 dev_disable_lro(idev->dev);
1349 }
1350 rtnl_unlock();
1351 rt_cache_flush(net, 0);
1352 }
1342 } 1353 }
1343 1354
1344 return ret; 1355 return ret;
@@ -1351,9 +1362,10 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1351 int *valp = ctl->data; 1362 int *valp = ctl->data;
1352 int val = *valp; 1363 int val = *valp;
1353 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 1364 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
1365 struct net *net = ctl->extra2;
1354 1366
1355 if (write && *valp != val) 1367 if (write && *valp != val)
1356 rt_cache_flush(0); 1368 rt_cache_flush(net, 0);
1357 1369
1358 return ret; 1370 return ret;
1359} 1371}
@@ -1364,9 +1376,10 @@ int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
1364{ 1376{
1365 int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp, 1377 int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp,
1366 newval, newlen); 1378 newval, newlen);
1379 struct net *net = table->extra2;
1367 1380
1368 if (ret == 1) 1381 if (ret == 1)
1369 rt_cache_flush(0); 1382 rt_cache_flush(net, 0);
1370 1383
1371 return ret; 1384 return ret;
1372} 1385}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 0b2ac6a3d903..65c1503f8cc8 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 Forwarding Information Base: FIB frontend. 6 * IPv4 Forwarding Information Base: FIB frontend.
7 * 7 *
8 * Version: $Id: fib_frontend.c,v 1.26 2001/10/31 21:55:54 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -146,7 +144,7 @@ static void fib_flush(struct net *net)
146 } 144 }
147 145
148 if (flushed) 146 if (flushed)
149 rt_cache_flush(-1); 147 rt_cache_flush(net, -1);
150} 148}
151 149
152/* 150/*
@@ -899,21 +897,22 @@ static void fib_disable_ip(struct net_device *dev, int force)
899{ 897{
900 if (fib_sync_down_dev(dev, force)) 898 if (fib_sync_down_dev(dev, force))
901 fib_flush(dev_net(dev)); 899 fib_flush(dev_net(dev));
902 rt_cache_flush(0); 900 rt_cache_flush(dev_net(dev), 0);
903 arp_ifdown(dev); 901 arp_ifdown(dev);
904} 902}
905 903
906static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) 904static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
907{ 905{
908 struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; 906 struct in_ifaddr *ifa = (struct in_ifaddr*)ptr;
907 struct net_device *dev = ifa->ifa_dev->dev;
909 908
910 switch (event) { 909 switch (event) {
911 case NETDEV_UP: 910 case NETDEV_UP:
912 fib_add_ifaddr(ifa); 911 fib_add_ifaddr(ifa);
913#ifdef CONFIG_IP_ROUTE_MULTIPATH 912#ifdef CONFIG_IP_ROUTE_MULTIPATH
914 fib_sync_up(ifa->ifa_dev->dev); 913 fib_sync_up(dev);
915#endif 914#endif
916 rt_cache_flush(-1); 915 rt_cache_flush(dev_net(dev), -1);
917 break; 916 break;
918 case NETDEV_DOWN: 917 case NETDEV_DOWN:
919 fib_del_ifaddr(ifa); 918 fib_del_ifaddr(ifa);
@@ -921,9 +920,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
921 /* Last address was deleted from this interface. 920 /* Last address was deleted from this interface.
922 Disable IP. 921 Disable IP.
923 */ 922 */
924 fib_disable_ip(ifa->ifa_dev->dev, 1); 923 fib_disable_ip(dev, 1);
925 } else { 924 } else {
926 rt_cache_flush(-1); 925 rt_cache_flush(dev_net(dev), -1);
927 } 926 }
928 break; 927 break;
929 } 928 }
@@ -951,14 +950,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
951#ifdef CONFIG_IP_ROUTE_MULTIPATH 950#ifdef CONFIG_IP_ROUTE_MULTIPATH
952 fib_sync_up(dev); 951 fib_sync_up(dev);
953#endif 952#endif
954 rt_cache_flush(-1); 953 rt_cache_flush(dev_net(dev), -1);
955 break; 954 break;
956 case NETDEV_DOWN: 955 case NETDEV_DOWN:
957 fib_disable_ip(dev, 0); 956 fib_disable_ip(dev, 0);
958 break; 957 break;
959 case NETDEV_CHANGEMTU: 958 case NETDEV_CHANGEMTU:
960 case NETDEV_CHANGE: 959 case NETDEV_CHANGE:
961 rt_cache_flush(0); 960 rt_cache_flush(dev_net(dev), 0);
962 break; 961 break;
963 } 962 }
964 return NOTIFY_DONE; 963 return NOTIFY_DONE;
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 2e2fc3376ac9..c8cac6c7f881 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 FIB: lookup engine and maintenance routines. 6 * IPv4 FIB: lookup engine and maintenance routines.
7 * 7 *
8 * Version: $Id: fib_hash.c,v 1.13 2001/10/31 21:55:54 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -474,7 +472,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
474 472
475 fib_release_info(fi_drop); 473 fib_release_info(fi_drop);
476 if (state & FA_S_ACCESSED) 474 if (state & FA_S_ACCESSED)
477 rt_cache_flush(-1); 475 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
478 rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id, 476 rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id,
479 &cfg->fc_nlinfo, NLM_F_REPLACE); 477 &cfg->fc_nlinfo, NLM_F_REPLACE);
480 return 0; 478 return 0;
@@ -534,7 +532,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
534 532
535 if (new_f) 533 if (new_f)
536 fz->fz_nent++; 534 fz->fz_nent++;
537 rt_cache_flush(-1); 535 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
538 536
539 rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id, 537 rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
540 &cfg->fc_nlinfo, 0); 538 &cfg->fc_nlinfo, 0);
@@ -616,7 +614,7 @@ static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg)
616 write_unlock_bh(&fib_hash_lock); 614 write_unlock_bh(&fib_hash_lock);
617 615
618 if (fa->fa_state & FA_S_ACCESSED) 616 if (fa->fa_state & FA_S_ACCESSED)
619 rt_cache_flush(-1); 617 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
620 fn_free_alias(fa, f); 618 fn_free_alias(fa, f);
621 if (kill_fn) { 619 if (kill_fn) {
622 fn_free_node(f); 620 fn_free_node(f);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 1fb56876be54..6080d7120821 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -258,9 +258,9 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
258 + nla_total_size(4); /* flow */ 258 + nla_total_size(4); /* flow */
259} 259}
260 260
261static void fib4_rule_flush_cache(void) 261static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
262{ 262{
263 rt_cache_flush(-1); 263 rt_cache_flush(ops->fro_net, -1);
264} 264}
265 265
266static struct fib_rules_ops fib4_rules_ops_template = { 266static struct fib_rules_ops fib4_rules_ops_template = {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 0d4d72827e4b..ded2ae34eab1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 Forwarding Information Base: semantics. 6 * IPv4 Forwarding Information Base: semantics.
7 * 7 *
8 * Version: $Id: fib_semantics.c,v 1.19 2002/01/12 07:54:56 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e1600ad8fb0e..f155a66d6ebf 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -22,8 +22,6 @@
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson 22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 * 24 *
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
26 *
27 * 25 *
28 * Code from fib_hash has been reused which includes the following header: 26 * Code from fib_hash has been reused which includes the following header:
29 * 27 *
@@ -1273,7 +1271,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1273 1271
1274 fib_release_info(fi_drop); 1272 fib_release_info(fi_drop);
1275 if (state & FA_S_ACCESSED) 1273 if (state & FA_S_ACCESSED)
1276 rt_cache_flush(-1); 1274 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1277 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, 1275 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1278 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); 1276 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1279 1277
@@ -1318,7 +1316,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1318 list_add_tail_rcu(&new_fa->fa_list, 1316 list_add_tail_rcu(&new_fa->fa_list,
1319 (fa ? &fa->fa_list : fa_head)); 1317 (fa ? &fa->fa_list : fa_head));
1320 1318
1321 rt_cache_flush(-1); 1319 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1322 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, 1320 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1323 &cfg->fc_nlinfo, 0); 1321 &cfg->fc_nlinfo, 0);
1324succeeded: 1322succeeded:
@@ -1661,7 +1659,7 @@ static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1661 trie_leaf_remove(t, l); 1659 trie_leaf_remove(t, l);
1662 1660
1663 if (fa->fa_state & FA_S_ACCESSED) 1661 if (fa->fa_state & FA_S_ACCESSED)
1664 rt_cache_flush(-1); 1662 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1665 1663
1666 fib_release_info(fa->fa_info); 1664 fib_release_info(fa->fa_info);
1667 alias_free_mem_rcu(fa); 1665 alias_free_mem_rcu(fa);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 87397351ddac..ea60ad41008c 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Alan Cox, <alan@redhat.com> 4 * Alan Cox, <alan@redhat.com>
5 * 5 *
6 * Version: $Id: icmp.c,v 1.85 2002/02/01 22:01:03 davem Exp $
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
@@ -298,10 +296,10 @@ out:
298/* 296/*
299 * Maintain the counters used in the SNMP statistics for outgoing ICMP 297 * Maintain the counters used in the SNMP statistics for outgoing ICMP
300 */ 298 */
301void icmp_out_count(unsigned char type) 299void icmp_out_count(struct net *net, unsigned char type)
302{ 300{
303 ICMPMSGOUT_INC_STATS(type); 301 ICMPMSGOUT_INC_STATS(net, type);
304 ICMP_INC_STATS(ICMP_MIB_OUTMSGS); 302 ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
305} 303}
306 304
307/* 305/*
@@ -765,7 +763,7 @@ static void icmp_unreach(struct sk_buff *skb)
765out: 763out:
766 return; 764 return;
767out_err: 765out_err:
768 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 766 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
769 goto out; 767 goto out;
770} 768}
771 769
@@ -805,7 +803,7 @@ static void icmp_redirect(struct sk_buff *skb)
805out: 803out:
806 return; 804 return;
807out_err: 805out_err:
808 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 806 ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
809 goto out; 807 goto out;
810} 808}
811 809
@@ -876,7 +874,7 @@ static void icmp_timestamp(struct sk_buff *skb)
876out: 874out:
877 return; 875 return;
878out_err: 876out_err:
879 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 877 ICMP_INC_STATS_BH(dev_net(skb->dst->dev), ICMP_MIB_INERRORS);
880 goto out; 878 goto out;
881} 879}
882 880
@@ -975,6 +973,7 @@ int icmp_rcv(struct sk_buff *skb)
975{ 973{
976 struct icmphdr *icmph; 974 struct icmphdr *icmph;
977 struct rtable *rt = skb->rtable; 975 struct rtable *rt = skb->rtable;
976 struct net *net = dev_net(rt->u.dst.dev);
978 977
979 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 978 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
980 int nh; 979 int nh;
@@ -995,7 +994,7 @@ int icmp_rcv(struct sk_buff *skb)
995 skb_set_network_header(skb, nh); 994 skb_set_network_header(skb, nh);
996 } 995 }
997 996
998 ICMP_INC_STATS_BH(ICMP_MIB_INMSGS); 997 ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
999 998
1000 switch (skb->ip_summed) { 999 switch (skb->ip_summed) {
1001 case CHECKSUM_COMPLETE: 1000 case CHECKSUM_COMPLETE:
@@ -1013,7 +1012,7 @@ int icmp_rcv(struct sk_buff *skb)
1013 1012
1014 icmph = icmp_hdr(skb); 1013 icmph = icmp_hdr(skb);
1015 1014
1016 ICMPMSGIN_INC_STATS_BH(icmph->type); 1015 ICMPMSGIN_INC_STATS_BH(net, icmph->type);
1017 /* 1016 /*
1018 * 18 is the highest 'known' ICMP type. Anything else is a mystery 1017 * 18 is the highest 'known' ICMP type. Anything else is a mystery
1019 * 1018 *
@@ -1029,9 +1028,6 @@ int icmp_rcv(struct sk_buff *skb)
1029 */ 1028 */
1030 1029
1031 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 1030 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1032 struct net *net;
1033
1034 net = dev_net(rt->u.dst.dev);
1035 /* 1031 /*
1036 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be 1032 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
1037 * silently ignored (we let user decide with a sysctl). 1033 * silently ignored (we let user decide with a sysctl).
@@ -1057,7 +1053,7 @@ drop:
1057 kfree_skb(skb); 1053 kfree_skb(skb);
1058 return 0; 1054 return 0;
1059error: 1055error:
1060 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 1056 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1061 goto drop; 1057 goto drop;
1062} 1058}
1063 1059
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 2769dc4a4c84..68e84a933e90 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -8,8 +8,6 @@
8 * the older version didn't come out right using gcc 2.5.8, the newer one 8 * the older version didn't come out right using gcc 2.5.8, the newer one
9 * seems to fall out with gcc 2.6.2. 9 * seems to fall out with gcc 2.6.2.
10 * 10 *
11 * Version: $Id: igmp.c,v 1.47 2002/02/01 22:01:03 davem Exp $
12 *
13 * Authors: 11 * Authors:
14 * Alan Cox <Alan.Cox@linux.org> 12 * Alan Cox <Alan.Cox@linux.org>
15 * 13 *
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ec834480abe7..bb81c958b744 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -103,7 +103,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
103 rover = net_random() % remaining + low; 103 rover = net_random() % remaining + low;
104 104
105 do { 105 do {
106 head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; 106 head = &hashinfo->bhash[inet_bhashfn(net, rover,
107 hashinfo->bhash_size)];
107 spin_lock(&head->lock); 108 spin_lock(&head->lock);
108 inet_bind_bucket_for_each(tb, node, &head->chain) 109 inet_bind_bucket_for_each(tb, node, &head->chain)
109 if (tb->ib_net == net && tb->port == rover) 110 if (tb->ib_net == net && tb->port == rover)
@@ -130,7 +131,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
130 */ 131 */
131 snum = rover; 132 snum = rover;
132 } else { 133 } else {
133 head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; 134 head = &hashinfo->bhash[inet_bhashfn(net, snum,
135 hashinfo->bhash_size)];
134 spin_lock(&head->lock); 136 spin_lock(&head->lock);
135 inet_bind_bucket_for_each(tb, node, &head->chain) 137 inet_bind_bucket_for_each(tb, node, &head->chain)
136 if (tb->ib_net == net && tb->port == snum) 138 if (tb->ib_net == net && tb->port == snum)
@@ -336,15 +338,16 @@ struct dst_entry* inet_csk_route_req(struct sock *sk,
336 .uli_u = { .ports = 338 .uli_u = { .ports =
337 { .sport = inet_sk(sk)->sport, 339 { .sport = inet_sk(sk)->sport,
338 .dport = ireq->rmt_port } } }; 340 .dport = ireq->rmt_port } } };
341 struct net *net = sock_net(sk);
339 342
340 security_req_classify_flow(req, &fl); 343 security_req_classify_flow(req, &fl);
341 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) { 344 if (ip_route_output_flow(net, &rt, &fl, sk, 0)) {
342 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 345 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
343 return NULL; 346 return NULL;
344 } 347 }
345 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) { 348 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
346 ip_rt_put(rt); 349 ip_rt_put(rt);
347 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 350 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
348 return NULL; 351 return NULL;
349 } 352 }
350 return &rt->u.dst; 353 return &rt->u.dst;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index da97695e7096..c10036e7a463 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * inet_diag.c Module for monitoring INET transport protocols sockets. 2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 * 3 *
4 * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 *
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 2023d37b2708..115f53722d20 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -70,7 +70,8 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
70static void __inet_put_port(struct sock *sk) 70static void __inet_put_port(struct sock *sk)
71{ 71{
72 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 72 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
73 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); 73 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num,
74 hashinfo->bhash_size);
74 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 75 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
75 struct inet_bind_bucket *tb; 76 struct inet_bind_bucket *tb;
76 77
@@ -95,7 +96,8 @@ EXPORT_SYMBOL(inet_put_port);
95void __inet_inherit_port(struct sock *sk, struct sock *child) 96void __inet_inherit_port(struct sock *sk, struct sock *child)
96{ 97{
97 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; 98 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
98 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); 99 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num,
100 table->bhash_size);
99 struct inet_bind_hashbucket *head = &table->bhash[bhash]; 101 struct inet_bind_hashbucket *head = &table->bhash[bhash];
100 struct inet_bind_bucket *tb; 102 struct inet_bind_bucket *tb;
101 103
@@ -192,7 +194,7 @@ struct sock *__inet_lookup_listener(struct net *net,
192 const struct hlist_head *head; 194 const struct hlist_head *head;
193 195
194 read_lock(&hashinfo->lhash_lock); 196 read_lock(&hashinfo->lhash_lock);
195 head = &hashinfo->listening_hash[inet_lhashfn(hnum)]; 197 head = &hashinfo->listening_hash[inet_lhashfn(net, hnum)];
196 if (!hlist_empty(head)) { 198 if (!hlist_empty(head)) {
197 const struct inet_sock *inet = inet_sk((sk = __sk_head(head))); 199 const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
198 200
@@ -225,7 +227,7 @@ struct sock * __inet_lookup_established(struct net *net,
225 /* Optimize here for direct hit, only listening connections can 227 /* Optimize here for direct hit, only listening connections can
226 * have wildcards anyways. 228 * have wildcards anyways.
227 */ 229 */
228 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport); 230 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
229 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); 231 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
230 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); 232 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
231 233
@@ -265,13 +267,13 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
265 int dif = sk->sk_bound_dev_if; 267 int dif = sk->sk_bound_dev_if;
266 INET_ADDR_COOKIE(acookie, saddr, daddr) 268 INET_ADDR_COOKIE(acookie, saddr, daddr)
267 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); 269 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
268 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); 270 struct net *net = sock_net(sk);
271 unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport);
269 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 272 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
270 rwlock_t *lock = inet_ehash_lockp(hinfo, hash); 273 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
271 struct sock *sk2; 274 struct sock *sk2;
272 const struct hlist_node *node; 275 const struct hlist_node *node;
273 struct inet_timewait_sock *tw; 276 struct inet_timewait_sock *tw;
274 struct net *net = sock_net(sk);
275 277
276 prefetch(head->chain.first); 278 prefetch(head->chain.first);
277 write_lock(lock); 279 write_lock(lock);
@@ -310,11 +312,11 @@ unique:
310 312
311 if (twp) { 313 if (twp) {
312 *twp = tw; 314 *twp = tw;
313 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 315 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
314 } else if (tw) { 316 } else if (tw) {
315 /* Silly. Should hash-dance instead... */ 317 /* Silly. Should hash-dance instead... */
316 inet_twsk_deschedule(tw, death_row); 318 inet_twsk_deschedule(tw, death_row);
317 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 319 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
318 320
319 inet_twsk_put(tw); 321 inet_twsk_put(tw);
320 } 322 }
@@ -438,7 +440,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
438 local_bh_disable(); 440 local_bh_disable();
439 for (i = 1; i <= remaining; i++) { 441 for (i = 1; i <= remaining; i++) {
440 port = low + (i + offset) % remaining; 442 port = low + (i + offset) % remaining;
441 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; 443 head = &hinfo->bhash[inet_bhashfn(net, port,
444 hinfo->bhash_size)];
442 spin_lock(&head->lock); 445 spin_lock(&head->lock);
443 446
444 /* Does not bother with rcv_saddr checks, 447 /* Does not bother with rcv_saddr checks,
@@ -493,7 +496,7 @@ ok:
493 goto out; 496 goto out;
494 } 497 }
495 498
496 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; 499 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
497 tb = inet_csk(sk)->icsk_bind_hash; 500 tb = inet_csk(sk)->icsk_bind_hash;
498 spin_lock_bh(&head->lock); 501 spin_lock_bh(&head->lock);
499 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 502 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ce16e9ac24c1..75c2def8f9a0 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -32,7 +32,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
32 write_unlock(lock); 32 write_unlock(lock);
33 33
34 /* Disassociate with bind bucket. */ 34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)]; 35 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
36 hashinfo->bhash_size)];
36 spin_lock(&bhead->lock); 37 spin_lock(&bhead->lock);
37 tb = tw->tw_tb; 38 tb = tw->tw_tb;
38 __hlist_del(&tw->tw_bind_node); 39 __hlist_del(&tw->tw_bind_node);
@@ -81,7 +82,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
81 Note, that any socket with inet->num != 0 MUST be bound in 82 Note, that any socket with inet->num != 0 MUST be bound in
82 binding cache, even if it is closed. 83 binding cache, even if it is closed.
83 */ 84 */
84 bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)]; 85 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num,
86 hashinfo->bhash_size)];
85 spin_lock(&bhead->lock); 87 spin_lock(&bhead->lock);
86 tw->tw_tb = icsk->icsk_bind_hash; 88 tw->tw_tb = icsk->icsk_bind_hash;
87 BUG_TRAP(icsk->icsk_bind_hash); 89 BUG_TRAP(icsk->icsk_bind_hash);
@@ -158,6 +160,9 @@ rescan:
158 __inet_twsk_del_dead_node(tw); 160 __inet_twsk_del_dead_node(tw);
159 spin_unlock(&twdr->death_lock); 161 spin_unlock(&twdr->death_lock);
160 __inet_twsk_kill(tw, twdr->hashinfo); 162 __inet_twsk_kill(tw, twdr->hashinfo);
163#ifdef CONFIG_NET_NS
164 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
165#endif
161 inet_twsk_put(tw); 166 inet_twsk_put(tw);
162 killed++; 167 killed++;
163 spin_lock(&twdr->death_lock); 168 spin_lock(&twdr->death_lock);
@@ -176,8 +181,9 @@ rescan:
176 } 181 }
177 182
178 twdr->tw_count -= killed; 183 twdr->tw_count -= killed;
179 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed); 184#ifndef CONFIG_NET_NS
180 185 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
186#endif
181 return ret; 187 return ret;
182} 188}
183 189
@@ -370,6 +376,9 @@ void inet_twdr_twcal_tick(unsigned long data)
370 &twdr->twcal_row[slot]) { 376 &twdr->twcal_row[slot]) {
371 __inet_twsk_del_dead_node(tw); 377 __inet_twsk_del_dead_node(tw);
372 __inet_twsk_kill(tw, twdr->hashinfo); 378 __inet_twsk_kill(tw, twdr->hashinfo);
379#ifdef CONFIG_NET_NS
380 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
381#endif
373 inet_twsk_put(tw); 382 inet_twsk_put(tw);
374 killed++; 383 killed++;
375 } 384 }
@@ -393,7 +402,9 @@ void inet_twdr_twcal_tick(unsigned long data)
393out: 402out:
394 if ((twdr->tw_count -= killed) == 0) 403 if ((twdr->tw_count -= killed) == 0)
395 del_timer(&twdr->tw_timer); 404 del_timer(&twdr->tw_timer);
396 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed); 405#ifndef CONFIG_NET_NS
406 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
407#endif
397 spin_unlock(&twdr->death_lock); 408 spin_unlock(&twdr->death_lock);
398} 409}
399 410
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index af995198f643..a456ceeac3f2 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources. 4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 * 5 *
6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $
7 *
8 * Authors: Andrey V. Savochkin <saw@msu.ru> 6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 */ 7 */
10 8
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 4813c39b438b..450016b89a18 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP forwarding functionality. 6 * The IP forwarding functionality.
7 * 7 *
8 * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $
9 *
10 * Authors: see ip.c 8 * Authors: see ip.c
11 * 9 *
12 * Fixes: 10 * Fixes:
@@ -44,7 +42,7 @@ static int ip_forward_finish(struct sk_buff *skb)
44{ 42{
45 struct ip_options * opt = &(IPCB(skb)->opt); 43 struct ip_options * opt = &(IPCB(skb)->opt);
46 44
47 IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); 45 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
48 46
49 if (unlikely(opt->optlen)) 47 if (unlikely(opt->optlen))
50 ip_forward_options(skb); 48 ip_forward_options(skb);
@@ -58,6 +56,9 @@ int ip_forward(struct sk_buff *skb)
58 struct rtable *rt; /* Route we use */ 56 struct rtable *rt; /* Route we use */
59 struct ip_options * opt = &(IPCB(skb)->opt); 57 struct ip_options * opt = &(IPCB(skb)->opt);
60 58
59 if (skb_warn_if_lro(skb))
60 goto drop;
61
61 if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) 62 if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb))
62 goto drop; 63 goto drop;
63 64
@@ -87,7 +88,7 @@ int ip_forward(struct sk_buff *skb)
87 88
88 if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && 89 if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) &&
89 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { 90 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
90 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); 91 IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS);
91 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 92 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
92 htonl(dst_mtu(&rt->u.dst))); 93 htonl(dst_mtu(&rt->u.dst)));
93 goto drop; 94 goto drop;
@@ -122,7 +123,7 @@ sr_failed:
122 123
123too_many_hops: 124too_many_hops:
124 /* Tell the sender its packet died... */ 125 /* Tell the sender its packet died... */
125 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 126 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_INHDRERRORS);
126 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); 127 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
127drop: 128drop:
128 kfree_skb(skb); 129 kfree_skb(skb);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 37221f659159..38d38f058018 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP fragmentation functionality. 6 * The IP fragmentation functionality.
7 * 7 *
8 * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
9 *
10 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox <Alan.Cox@linux.org> 9 * Alan Cox <Alan.Cox@linux.org>
12 * 10 *
@@ -180,7 +178,7 @@ static void ip_evictor(struct net *net)
180 178
181 evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); 179 evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
182 if (evicted) 180 if (evicted)
183 IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted); 181 IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
184} 182}
185 183
186/* 184/*
@@ -189,8 +187,10 @@ static void ip_evictor(struct net *net)
189static void ip_expire(unsigned long arg) 187static void ip_expire(unsigned long arg)
190{ 188{
191 struct ipq *qp; 189 struct ipq *qp;
190 struct net *net;
192 191
193 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 192 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
193 net = container_of(qp->q.net, struct net, ipv4.frags);
194 194
195 spin_lock(&qp->q.lock); 195 spin_lock(&qp->q.lock);
196 196
@@ -199,14 +199,12 @@ static void ip_expire(unsigned long arg)
199 199
200 ipq_kill(qp); 200 ipq_kill(qp);
201 201
202 IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); 202 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
203 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 204
205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 struct net *net;
208 207
209 net = container_of(qp->q.net, struct net, ipv4.frags);
210 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 208 /* Send an ICMP "Fragment Reassembly Timeout" message. */
211 if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) { 209 if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 210 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
@@ -263,7 +261,10 @@ static inline int ip_frag_too_far(struct ipq *qp)
263 rc = qp->q.fragments && (end - start) > max; 261 rc = qp->q.fragments && (end - start) > max;
264 262
265 if (rc) { 263 if (rc) {
266 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 264 struct net *net;
265
266 net = container_of(qp->q.net, struct net, ipv4.frags);
267 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
267 } 268 }
268 269
269 return rc; 270 return rc;
@@ -547,7 +548,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
547 iph = ip_hdr(head); 548 iph = ip_hdr(head);
548 iph->frag_off = 0; 549 iph->frag_off = 0;
549 iph->tot_len = htons(len); 550 iph->tot_len = htons(len);
550 IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); 551 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMOKS);
551 qp->q.fragments = NULL; 552 qp->q.fragments = NULL;
552 return 0; 553 return 0;
553 554
@@ -562,7 +563,7 @@ out_oversize:
562 "Oversized IP packet from " NIPQUAD_FMT ".\n", 563 "Oversized IP packet from " NIPQUAD_FMT ".\n",
563 NIPQUAD(qp->saddr)); 564 NIPQUAD(qp->saddr));
564out_fail: 565out_fail:
565 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 566 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
566 return err; 567 return err;
567} 568}
568 569
@@ -572,9 +573,9 @@ int ip_defrag(struct sk_buff *skb, u32 user)
572 struct ipq *qp; 573 struct ipq *qp;
573 struct net *net; 574 struct net *net;
574 575
575 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
576
577 net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev); 576 net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev);
577 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
578
578 /* Start by cleaning up the memory. */ 579 /* Start by cleaning up the memory. */
579 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) 580 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
580 ip_evictor(net); 581 ip_evictor(net);
@@ -592,7 +593,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
592 return ret; 593 return ret;
593 } 594 }
594 595
595 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 596 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
596 kfree_skb(skb); 597 kfree_skb(skb);
597 return -ENOMEM; 598 return -ENOMEM;
598} 599}
@@ -600,7 +601,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
600#ifdef CONFIG_SYSCTL 601#ifdef CONFIG_SYSCTL
601static int zero; 602static int zero;
602 603
603static struct ctl_table ip4_frags_ctl_table[] = { 604static struct ctl_table ip4_frags_ns_ctl_table[] = {
604 { 605 {
605 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, 606 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH,
606 .procname = "ipfrag_high_thresh", 607 .procname = "ipfrag_high_thresh",
@@ -626,6 +627,10 @@ static struct ctl_table ip4_frags_ctl_table[] = {
626 .proc_handler = &proc_dointvec_jiffies, 627 .proc_handler = &proc_dointvec_jiffies,
627 .strategy = &sysctl_jiffies 628 .strategy = &sysctl_jiffies
628 }, 629 },
630 { }
631};
632
633static struct ctl_table ip4_frags_ctl_table[] = {
629 { 634 {
630 .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL, 635 .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL,
631 .procname = "ipfrag_secret_interval", 636 .procname = "ipfrag_secret_interval",
@@ -646,22 +651,20 @@ static struct ctl_table ip4_frags_ctl_table[] = {
646 { } 651 { }
647}; 652};
648 653
649static int ip4_frags_ctl_register(struct net *net) 654static int ip4_frags_ns_ctl_register(struct net *net)
650{ 655{
651 struct ctl_table *table; 656 struct ctl_table *table;
652 struct ctl_table_header *hdr; 657 struct ctl_table_header *hdr;
653 658
654 table = ip4_frags_ctl_table; 659 table = ip4_frags_ns_ctl_table;
655 if (net != &init_net) { 660 if (net != &init_net) {
656 table = kmemdup(table, sizeof(ip4_frags_ctl_table), GFP_KERNEL); 661 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
657 if (table == NULL) 662 if (table == NULL)
658 goto err_alloc; 663 goto err_alloc;
659 664
660 table[0].data = &net->ipv4.frags.high_thresh; 665 table[0].data = &net->ipv4.frags.high_thresh;
661 table[1].data = &net->ipv4.frags.low_thresh; 666 table[1].data = &net->ipv4.frags.low_thresh;
662 table[2].data = &net->ipv4.frags.timeout; 667 table[2].data = &net->ipv4.frags.timeout;
663 table[3].mode &= ~0222;
664 table[4].mode &= ~0222;
665 } 668 }
666 669
667 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); 670 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
@@ -678,7 +681,7 @@ err_alloc:
678 return -ENOMEM; 681 return -ENOMEM;
679} 682}
680 683
681static void ip4_frags_ctl_unregister(struct net *net) 684static void ip4_frags_ns_ctl_unregister(struct net *net)
682{ 685{
683 struct ctl_table *table; 686 struct ctl_table *table;
684 687
@@ -686,13 +689,22 @@ static void ip4_frags_ctl_unregister(struct net *net)
686 unregister_net_sysctl_table(net->ipv4.frags_hdr); 689 unregister_net_sysctl_table(net->ipv4.frags_hdr);
687 kfree(table); 690 kfree(table);
688} 691}
692
693static void ip4_frags_ctl_register(void)
694{
695 register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
696}
689#else 697#else
690static inline int ip4_frags_ctl_register(struct net *net) 698static inline int ip4_frags_ns_ctl_register(struct net *net)
691{ 699{
692 return 0; 700 return 0;
693} 701}
694 702
695static inline void ip4_frags_ctl_unregister(struct net *net) 703static inline void ip4_frags_ns_ctl_unregister(struct net *net)
704{
705}
706
707static inline void ip4_frags_ctl_register(void)
696{ 708{
697} 709}
698#endif 710#endif
@@ -716,12 +728,12 @@ static int ipv4_frags_init_net(struct net *net)
716 728
717 inet_frags_init_net(&net->ipv4.frags); 729 inet_frags_init_net(&net->ipv4.frags);
718 730
719 return ip4_frags_ctl_register(net); 731 return ip4_frags_ns_ctl_register(net);
720} 732}
721 733
722static void ipv4_frags_exit_net(struct net *net) 734static void ipv4_frags_exit_net(struct net *net)
723{ 735{
724 ip4_frags_ctl_unregister(net); 736 ip4_frags_ns_ctl_unregister(net);
725 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 737 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
726} 738}
727 739
@@ -732,6 +744,7 @@ static struct pernet_operations ip4_frags_ops = {
732 744
733void __init ipfrag_init(void) 745void __init ipfrag_init(void)
734{ 746{
747 ip4_frags_ctl_register();
735 register_pernet_subsys(&ip4_frags_ops); 748 register_pernet_subsys(&ip4_frags_ops);
736 ip4_frags.hashfn = ip4_hashfn; 749 ip4_frags.hashfn = ip4_hashfn;
737 ip4_frags.constructor = ip4_frag_init; 750 ip4_frags.constructor = ip4_frag_init;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 4342cba4ff82..2a61158ea722 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -473,6 +473,8 @@ static int ipgre_rcv(struct sk_buff *skb)
473 read_lock(&ipgre_lock); 473 read_lock(&ipgre_lock);
474 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), 474 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev),
475 iph->saddr, iph->daddr, key)) != NULL) { 475 iph->saddr, iph->daddr, key)) != NULL) {
476 struct net_device_stats *stats = &tunnel->dev->stats;
477
476 secpath_reset(skb); 478 secpath_reset(skb);
477 479
478 skb->protocol = *(__be16*)(h + 2); 480 skb->protocol = *(__be16*)(h + 2);
@@ -497,28 +499,28 @@ static int ipgre_rcv(struct sk_buff *skb)
497 /* Looped back packet, drop it! */ 499 /* Looped back packet, drop it! */
498 if (skb->rtable->fl.iif == 0) 500 if (skb->rtable->fl.iif == 0)
499 goto drop; 501 goto drop;
500 tunnel->stat.multicast++; 502 stats->multicast++;
501 skb->pkt_type = PACKET_BROADCAST; 503 skb->pkt_type = PACKET_BROADCAST;
502 } 504 }
503#endif 505#endif
504 506
505 if (((flags&GRE_CSUM) && csum) || 507 if (((flags&GRE_CSUM) && csum) ||
506 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { 508 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
507 tunnel->stat.rx_crc_errors++; 509 stats->rx_crc_errors++;
508 tunnel->stat.rx_errors++; 510 stats->rx_errors++;
509 goto drop; 511 goto drop;
510 } 512 }
511 if (tunnel->parms.i_flags&GRE_SEQ) { 513 if (tunnel->parms.i_flags&GRE_SEQ) {
512 if (!(flags&GRE_SEQ) || 514 if (!(flags&GRE_SEQ) ||
513 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { 515 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
514 tunnel->stat.rx_fifo_errors++; 516 stats->rx_fifo_errors++;
515 tunnel->stat.rx_errors++; 517 stats->rx_errors++;
516 goto drop; 518 goto drop;
517 } 519 }
518 tunnel->i_seqno = seqno + 1; 520 tunnel->i_seqno = seqno + 1;
519 } 521 }
520 tunnel->stat.rx_packets++; 522 stats->rx_packets++;
521 tunnel->stat.rx_bytes += skb->len; 523 stats->rx_bytes += skb->len;
522 skb->dev = tunnel->dev; 524 skb->dev = tunnel->dev;
523 dst_release(skb->dst); 525 dst_release(skb->dst);
524 skb->dst = NULL; 526 skb->dst = NULL;
@@ -540,7 +542,7 @@ drop_nolock:
540static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 542static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
541{ 543{
542 struct ip_tunnel *tunnel = netdev_priv(dev); 544 struct ip_tunnel *tunnel = netdev_priv(dev);
543 struct net_device_stats *stats = &tunnel->stat; 545 struct net_device_stats *stats = &tunnel->dev->stats;
544 struct iphdr *old_iph = ip_hdr(skb); 546 struct iphdr *old_iph = ip_hdr(skb);
545 struct iphdr *tiph; 547 struct iphdr *tiph;
546 u8 tos; 548 u8 tos;
@@ -554,7 +556,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
554 int mtu; 556 int mtu;
555 557
556 if (tunnel->recursion++) { 558 if (tunnel->recursion++) {
557 tunnel->stat.collisions++; 559 stats->collisions++;
558 goto tx_error; 560 goto tx_error;
559 } 561 }
560 562
@@ -570,7 +572,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
570 /* NBMA tunnel */ 572 /* NBMA tunnel */
571 573
572 if (skb->dst == NULL) { 574 if (skb->dst == NULL) {
573 tunnel->stat.tx_fifo_errors++; 575 stats->tx_fifo_errors++;
574 goto tx_error; 576 goto tx_error;
575 } 577 }
576 578
@@ -621,7 +623,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
621 .tos = RT_TOS(tos) } }, 623 .tos = RT_TOS(tos) } },
622 .proto = IPPROTO_GRE }; 624 .proto = IPPROTO_GRE };
623 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 625 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
624 tunnel->stat.tx_carrier_errors++; 626 stats->tx_carrier_errors++;
625 goto tx_error; 627 goto tx_error;
626 } 628 }
627 } 629 }
@@ -629,7 +631,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
629 631
630 if (tdev == dev) { 632 if (tdev == dev) {
631 ip_rt_put(rt); 633 ip_rt_put(rt);
632 tunnel->stat.collisions++; 634 stats->collisions++;
633 goto tx_error; 635 goto tx_error;
634 } 636 }
635 637
@@ -954,11 +956,6 @@ done:
954 return err; 956 return err;
955} 957}
956 958
957static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
958{
959 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
960}
961
962static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 959static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
963{ 960{
964 struct ip_tunnel *tunnel = netdev_priv(dev); 961 struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -1084,7 +1081,6 @@ static void ipgre_tunnel_setup(struct net_device *dev)
1084 dev->uninit = ipgre_tunnel_uninit; 1081 dev->uninit = ipgre_tunnel_uninit;
1085 dev->destructor = free_netdev; 1082 dev->destructor = free_netdev;
1086 dev->hard_start_xmit = ipgre_tunnel_xmit; 1083 dev->hard_start_xmit = ipgre_tunnel_xmit;
1087 dev->get_stats = ipgre_tunnel_get_stats;
1088 dev->do_ioctl = ipgre_tunnel_ioctl; 1084 dev->do_ioctl = ipgre_tunnel_ioctl;
1089 dev->change_mtu = ipgre_tunnel_change_mtu; 1085 dev->change_mtu = ipgre_tunnel_change_mtu;
1090 1086
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index ff77a4a7f9ec..043f640df4b7 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The Internet Protocol (IP) module. 6 * The Internet Protocol (IP) module.
7 * 7 *
8 * Version: $Id: ip_input.c,v 1.55 2002/01/12 07:39:45 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 10 * Donald Becker, <becker@super.org>
@@ -232,16 +230,16 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
232 protocol = -ret; 230 protocol = -ret;
233 goto resubmit; 231 goto resubmit;
234 } 232 }
235 IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); 233 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
236 } else { 234 } else {
237 if (!raw) { 235 if (!raw) {
238 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 236 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
239 IP_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS); 237 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
240 icmp_send(skb, ICMP_DEST_UNREACH, 238 icmp_send(skb, ICMP_DEST_UNREACH,
241 ICMP_PROT_UNREACH, 0); 239 ICMP_PROT_UNREACH, 0);
242 } 240 }
243 } else 241 } else
244 IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); 242 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
245 kfree_skb(skb); 243 kfree_skb(skb);
246 } 244 }
247 } 245 }
@@ -283,7 +281,7 @@ static inline int ip_rcv_options(struct sk_buff *skb)
283 --ANK (980813) 281 --ANK (980813)
284 */ 282 */
285 if (skb_cow(skb, skb_headroom(skb))) { 283 if (skb_cow(skb, skb_headroom(skb))) {
286 IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); 284 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
287 goto drop; 285 goto drop;
288 } 286 }
289 287
@@ -292,7 +290,7 @@ static inline int ip_rcv_options(struct sk_buff *skb)
292 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 290 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
293 291
294 if (ip_options_compile(dev_net(dev), opt, skb)) { 292 if (ip_options_compile(dev_net(dev), opt, skb)) {
295 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 293 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
296 goto drop; 294 goto drop;
297 } 295 }
298 296
@@ -336,9 +334,11 @@ static int ip_rcv_finish(struct sk_buff *skb)
336 skb->dev); 334 skb->dev);
337 if (unlikely(err)) { 335 if (unlikely(err)) {
338 if (err == -EHOSTUNREACH) 336 if (err == -EHOSTUNREACH)
339 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); 337 IP_INC_STATS_BH(dev_net(skb->dev),
338 IPSTATS_MIB_INADDRERRORS);
340 else if (err == -ENETUNREACH) 339 else if (err == -ENETUNREACH)
341 IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES); 340 IP_INC_STATS_BH(dev_net(skb->dev),
341 IPSTATS_MIB_INNOROUTES);
342 goto drop; 342 goto drop;
343 } 343 }
344 } 344 }
@@ -359,9 +359,9 @@ static int ip_rcv_finish(struct sk_buff *skb)
359 359
360 rt = skb->rtable; 360 rt = skb->rtable;
361 if (rt->rt_type == RTN_MULTICAST) 361 if (rt->rt_type == RTN_MULTICAST)
362 IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); 362 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCASTPKTS);
363 else if (rt->rt_type == RTN_BROADCAST) 363 else if (rt->rt_type == RTN_BROADCAST)
364 IP_INC_STATS_BH(IPSTATS_MIB_INBCASTPKTS); 364 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCASTPKTS);
365 365
366 return dst_input(skb); 366 return dst_input(skb);
367 367
@@ -384,10 +384,10 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
384 if (skb->pkt_type == PACKET_OTHERHOST) 384 if (skb->pkt_type == PACKET_OTHERHOST)
385 goto drop; 385 goto drop;
386 386
387 IP_INC_STATS_BH(IPSTATS_MIB_INRECEIVES); 387 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INRECEIVES);
388 388
389 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 389 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
390 IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); 390 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
391 goto out; 391 goto out;
392 } 392 }
393 393
@@ -420,7 +420,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
420 420
421 len = ntohs(iph->tot_len); 421 len = ntohs(iph->tot_len);
422 if (skb->len < len) { 422 if (skb->len < len) {
423 IP_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS); 423 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
424 goto drop; 424 goto drop;
425 } else if (len < (iph->ihl*4)) 425 } else if (len < (iph->ihl*4))
426 goto inhdr_error; 426 goto inhdr_error;
@@ -430,7 +430,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
430 * Note this now means skb->len holds ntohs(iph->tot_len). 430 * Note this now means skb->len holds ntohs(iph->tot_len).
431 */ 431 */
432 if (pskb_trim_rcsum(skb, len)) { 432 if (pskb_trim_rcsum(skb, len)) {
433 IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); 433 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
434 goto drop; 434 goto drop;
435 } 435 }
436 436
@@ -441,7 +441,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
441 ip_rcv_finish); 441 ip_rcv_finish);
442 442
443inhdr_error: 443inhdr_error:
444 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 444 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
445drop: 445drop:
446 kfree_skb(skb); 446 kfree_skb(skb);
447out: 447out:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 33126ad2cfdc..be3f18a7a40e 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The options processing module for ip.c 6 * The options processing module for ip.c
7 * 7 *
8 * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $
9 *
10 * Authors: A.N.Kuznetsov 8 * Authors: A.N.Kuznetsov
11 * 9 *
12 */ 10 */
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e527628f56cf..465544f6281a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The Internet Protocol (IP) output module. 6 * The Internet Protocol (IP) output module.
7 * 7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 10 * Donald Becker, <becker@super.org>
@@ -184,9 +182,9 @@ static inline int ip_finish_output2(struct sk_buff *skb)
184 unsigned int hh_len = LL_RESERVED_SPACE(dev); 182 unsigned int hh_len = LL_RESERVED_SPACE(dev);
185 183
186 if (rt->rt_type == RTN_MULTICAST) 184 if (rt->rt_type == RTN_MULTICAST)
187 IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); 185 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTMCASTPKTS);
188 else if (rt->rt_type == RTN_BROADCAST) 186 else if (rt->rt_type == RTN_BROADCAST)
189 IP_INC_STATS(IPSTATS_MIB_OUTBCASTPKTS); 187 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTBCASTPKTS);
190 188
191 /* Be paranoid, rather than too clever. */ 189 /* Be paranoid, rather than too clever. */
192 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 190 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -246,7 +244,7 @@ int ip_mc_output(struct sk_buff *skb)
246 /* 244 /*
247 * If the indicated interface is up and running, send the packet. 245 * If the indicated interface is up and running, send the packet.
248 */ 246 */
249 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS); 247 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS);
250 248
251 skb->dev = dev; 249 skb->dev = dev;
252 skb->protocol = htons(ETH_P_IP); 250 skb->protocol = htons(ETH_P_IP);
@@ -300,7 +298,7 @@ int ip_output(struct sk_buff *skb)
300{ 298{
301 struct net_device *dev = skb->dst->dev; 299 struct net_device *dev = skb->dst->dev;
302 300
303 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS); 301 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS);
304 302
305 skb->dev = dev; 303 skb->dev = dev;
306 skb->protocol = htons(ETH_P_IP); 304 skb->protocol = htons(ETH_P_IP);
@@ -391,7 +389,7 @@ packet_routed:
391 return ip_local_out(skb); 389 return ip_local_out(skb);
392 390
393no_route: 391no_route:
394 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES); 392 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
395 kfree_skb(skb); 393 kfree_skb(skb);
396 return -EHOSTUNREACH; 394 return -EHOSTUNREACH;
397} 395}
@@ -453,7 +451,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
453 iph = ip_hdr(skb); 451 iph = ip_hdr(skb);
454 452
455 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { 453 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
456 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); 454 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
457 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 455 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
458 htonl(ip_skb_dst_mtu(skb))); 456 htonl(ip_skb_dst_mtu(skb)));
459 kfree_skb(skb); 457 kfree_skb(skb);
@@ -544,7 +542,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
544 err = output(skb); 542 err = output(skb);
545 543
546 if (!err) 544 if (!err)
547 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 545 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
548 if (err || !frag) 546 if (err || !frag)
549 break; 547 break;
550 548
@@ -554,7 +552,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
554 } 552 }
555 553
556 if (err == 0) { 554 if (err == 0) {
557 IP_INC_STATS(IPSTATS_MIB_FRAGOKS); 555 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
558 return 0; 556 return 0;
559 } 557 }
560 558
@@ -563,7 +561,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
563 kfree_skb(frag); 561 kfree_skb(frag);
564 frag = skb; 562 frag = skb;
565 } 563 }
566 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); 564 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
567 return err; 565 return err;
568 } 566 }
569 567
@@ -675,15 +673,15 @@ slow_path:
675 if (err) 673 if (err)
676 goto fail; 674 goto fail;
677 675
678 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 676 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
679 } 677 }
680 kfree_skb(skb); 678 kfree_skb(skb);
681 IP_INC_STATS(IPSTATS_MIB_FRAGOKS); 679 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
682 return err; 680 return err;
683 681
684fail: 682fail:
685 kfree_skb(skb); 683 kfree_skb(skb);
686 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); 684 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
687 return err; 685 return err;
688} 686}
689 687
@@ -1049,7 +1047,7 @@ alloc_new_skb:
1049 1047
1050error: 1048error:
1051 inet->cork.length -= length; 1049 inet->cork.length -= length;
1052 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); 1050 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1053 return err; 1051 return err;
1054} 1052}
1055 1053
@@ -1191,7 +1189,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1191 1189
1192error: 1190error:
1193 inet->cork.length -= size; 1191 inet->cork.length -= size;
1194 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); 1192 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1195 return err; 1193 return err;
1196} 1194}
1197 1195
@@ -1213,6 +1211,7 @@ int ip_push_pending_frames(struct sock *sk)
1213 struct sk_buff *skb, *tmp_skb; 1211 struct sk_buff *skb, *tmp_skb;
1214 struct sk_buff **tail_skb; 1212 struct sk_buff **tail_skb;
1215 struct inet_sock *inet = inet_sk(sk); 1213 struct inet_sock *inet = inet_sk(sk);
1214 struct net *net = sock_net(sk);
1216 struct ip_options *opt = NULL; 1215 struct ip_options *opt = NULL;
1217 struct rtable *rt = (struct rtable *)inet->cork.dst; 1216 struct rtable *rt = (struct rtable *)inet->cork.dst;
1218 struct iphdr *iph; 1217 struct iphdr *iph;
@@ -1282,7 +1281,7 @@ int ip_push_pending_frames(struct sock *sk)
1282 skb->dst = dst_clone(&rt->u.dst); 1281 skb->dst = dst_clone(&rt->u.dst);
1283 1282
1284 if (iph->protocol == IPPROTO_ICMP) 1283 if (iph->protocol == IPPROTO_ICMP)
1285 icmp_out_count(((struct icmphdr *) 1284 icmp_out_count(net, ((struct icmphdr *)
1286 skb_transport_header(skb))->type); 1285 skb_transport_header(skb))->type);
1287 1286
1288 /* Netfilter gets whole the not fragmented skb. */ 1287 /* Netfilter gets whole the not fragmented skb. */
@@ -1299,7 +1298,7 @@ out:
1299 return err; 1298 return err;
1300 1299
1301error: 1300error:
1302 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); 1301 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1303 goto out; 1302 goto out;
1304} 1303}
1305 1304
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index e0514e82308e..105d92a039b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP to API glue. 6 * The IP to API glue.
7 * 7 *
8 * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: see ip.c 8 * Authors: see ip.c
11 * 9 *
12 * Fixes: 10 * Fixes:
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index ed45037ce9be..b88aa9afa42e 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: ipconfig.c,v 1.46 2002/02/01 22:01:04 davem Exp $
3 *
4 * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or 2 * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or
5 * user-supplied information to configure own IP address and routes. 3 * user-supplied information to configure own IP address and routes.
6 * 4 *
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index af5cb53da5cc..4c6d2caf9203 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Linux NET3: IP/IP protocol decoder. 2 * Linux NET3: IP/IP protocol decoder.
3 * 3 *
4 * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $
5 *
6 * Authors: 4 * Authors:
7 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 5 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
8 * 6 *
@@ -368,8 +366,8 @@ static int ipip_rcv(struct sk_buff *skb)
368 skb->protocol = htons(ETH_P_IP); 366 skb->protocol = htons(ETH_P_IP);
369 skb->pkt_type = PACKET_HOST; 367 skb->pkt_type = PACKET_HOST;
370 368
371 tunnel->stat.rx_packets++; 369 tunnel->dev->stats.rx_packets++;
372 tunnel->stat.rx_bytes += skb->len; 370 tunnel->dev->stats.rx_bytes += skb->len;
373 skb->dev = tunnel->dev; 371 skb->dev = tunnel->dev;
374 dst_release(skb->dst); 372 dst_release(skb->dst);
375 skb->dst = NULL; 373 skb->dst = NULL;
@@ -392,7 +390,7 @@ static int ipip_rcv(struct sk_buff *skb)
392static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 390static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
393{ 391{
394 struct ip_tunnel *tunnel = netdev_priv(dev); 392 struct ip_tunnel *tunnel = netdev_priv(dev);
395 struct net_device_stats *stats = &tunnel->stat; 393 struct net_device_stats *stats = &tunnel->dev->stats;
396 struct iphdr *tiph = &tunnel->parms.iph; 394 struct iphdr *tiph = &tunnel->parms.iph;
397 u8 tos = tunnel->parms.iph.tos; 395 u8 tos = tunnel->parms.iph.tos;
398 __be16 df = tiph->frag_off; 396 __be16 df = tiph->frag_off;
@@ -405,7 +403,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
405 int mtu; 403 int mtu;
406 404
407 if (tunnel->recursion++) { 405 if (tunnel->recursion++) {
408 tunnel->stat.collisions++; 406 stats->collisions++;
409 goto tx_error; 407 goto tx_error;
410 } 408 }
411 409
@@ -418,7 +416,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
418 if (!dst) { 416 if (!dst) {
419 /* NBMA tunnel */ 417 /* NBMA tunnel */
420 if ((rt = skb->rtable) == NULL) { 418 if ((rt = skb->rtable) == NULL) {
421 tunnel->stat.tx_fifo_errors++; 419 stats->tx_fifo_errors++;
422 goto tx_error; 420 goto tx_error;
423 } 421 }
424 if ((dst = rt->rt_gateway) == 0) 422 if ((dst = rt->rt_gateway) == 0)
@@ -433,7 +431,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
433 .tos = RT_TOS(tos) } }, 431 .tos = RT_TOS(tos) } },
434 .proto = IPPROTO_IPIP }; 432 .proto = IPPROTO_IPIP };
435 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 433 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
436 tunnel->stat.tx_carrier_errors++; 434 stats->tx_carrier_errors++;
437 goto tx_error_icmp; 435 goto tx_error_icmp;
438 } 436 }
439 } 437 }
@@ -441,7 +439,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
441 439
442 if (tdev == dev) { 440 if (tdev == dev) {
443 ip_rt_put(rt); 441 ip_rt_put(rt);
444 tunnel->stat.collisions++; 442 stats->collisions++;
445 goto tx_error; 443 goto tx_error;
446 } 444 }
447 445
@@ -451,7 +449,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
451 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 449 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
452 450
453 if (mtu < 68) { 451 if (mtu < 68) {
454 tunnel->stat.collisions++; 452 stats->collisions++;
455 ip_rt_put(rt); 453 ip_rt_put(rt);
456 goto tx_error; 454 goto tx_error;
457 } 455 }
@@ -685,11 +683,6 @@ done:
685 return err; 683 return err;
686} 684}
687 685
688static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
689{
690 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
691}
692
693static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) 686static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
694{ 687{
695 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 688 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
@@ -702,7 +695,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
702{ 695{
703 dev->uninit = ipip_tunnel_uninit; 696 dev->uninit = ipip_tunnel_uninit;
704 dev->hard_start_xmit = ipip_tunnel_xmit; 697 dev->hard_start_xmit = ipip_tunnel_xmit;
705 dev->get_stats = ipip_tunnel_get_stats;
706 dev->do_ioctl = ipip_tunnel_ioctl; 698 dev->do_ioctl = ipip_tunnel_ioctl;
707 dev->change_mtu = ipip_tunnel_change_mtu; 699 dev->change_mtu = ipip_tunnel_change_mtu;
708 dev->destructor = free_netdev; 700 dev->destructor = free_netdev;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 11700a4dcd95..033c712c3a5d 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -9,8 +9,6 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
13 *
14 * Fixes: 12 * Fixes:
15 * Michael Chastain : Incorrect size of copying. 13 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code 14 * Alan Cox : Added the cache manager code
@@ -120,6 +118,31 @@ static struct timer_list ipmr_expire_timer;
120 118
121/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 119/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
122 120
121static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
122{
123 dev_close(dev);
124
125 dev = __dev_get_by_name(&init_net, "tunl0");
126 if (dev) {
127 struct ifreq ifr;
128 mm_segment_t oldfs;
129 struct ip_tunnel_parm p;
130
131 memset(&p, 0, sizeof(p));
132 p.iph.daddr = v->vifc_rmt_addr.s_addr;
133 p.iph.saddr = v->vifc_lcl_addr.s_addr;
134 p.iph.version = 4;
135 p.iph.ihl = 5;
136 p.iph.protocol = IPPROTO_IPIP;
137 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
138 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
139
140 oldfs = get_fs(); set_fs(KERNEL_DS);
141 dev->do_ioctl(dev, &ifr, SIOCDELTUNNEL);
142 set_fs(oldfs);
143 }
144}
145
123static 146static
124struct net_device *ipmr_new_tunnel(struct vifctl *v) 147struct net_device *ipmr_new_tunnel(struct vifctl *v)
125{ 148{
@@ -161,6 +184,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
161 184
162 if (dev_open(dev)) 185 if (dev_open(dev))
163 goto failure; 186 goto failure;
187 dev_hold(dev);
164 } 188 }
165 } 189 }
166 return dev; 190 return dev;
@@ -181,26 +205,20 @@ static int reg_vif_num = -1;
181static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 205static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
182{ 206{
183 read_lock(&mrt_lock); 207 read_lock(&mrt_lock);
184 ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len; 208 dev->stats.tx_bytes += skb->len;
185 ((struct net_device_stats*)netdev_priv(dev))->tx_packets++; 209 dev->stats.tx_packets++;
186 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); 210 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
187 read_unlock(&mrt_lock); 211 read_unlock(&mrt_lock);
188 kfree_skb(skb); 212 kfree_skb(skb);
189 return 0; 213 return 0;
190} 214}
191 215
192static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
193{
194 return (struct net_device_stats*)netdev_priv(dev);
195}
196
197static void reg_vif_setup(struct net_device *dev) 216static void reg_vif_setup(struct net_device *dev)
198{ 217{
199 dev->type = ARPHRD_PIMREG; 218 dev->type = ARPHRD_PIMREG;
200 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 219 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
201 dev->flags = IFF_NOARP; 220 dev->flags = IFF_NOARP;
202 dev->hard_start_xmit = reg_vif_xmit; 221 dev->hard_start_xmit = reg_vif_xmit;
203 dev->get_stats = reg_vif_get_stats;
204 dev->destructor = free_netdev; 222 dev->destructor = free_netdev;
205} 223}
206 224
@@ -209,8 +227,7 @@ static struct net_device *ipmr_reg_vif(void)
209 struct net_device *dev; 227 struct net_device *dev;
210 struct in_device *in_dev; 228 struct in_device *in_dev;
211 229
212 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg", 230 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
213 reg_vif_setup);
214 231
215 if (dev == NULL) 232 if (dev == NULL)
216 return NULL; 233 return NULL;
@@ -234,6 +251,8 @@ static struct net_device *ipmr_reg_vif(void)
234 if (dev_open(dev)) 251 if (dev_open(dev))
235 goto failure; 252 goto failure;
236 253
254 dev_hold(dev);
255
237 return dev; 256 return dev;
238 257
239failure: 258failure:
@@ -248,9 +267,10 @@ failure:
248 267
249/* 268/*
250 * Delete a VIF entry 269 * Delete a VIF entry
270 * @notify: Set to 1, if the caller is a notifier_call
251 */ 271 */
252 272
253static int vif_delete(int vifi) 273static int vif_delete(int vifi, int notify)
254{ 274{
255 struct vif_device *v; 275 struct vif_device *v;
256 struct net_device *dev; 276 struct net_device *dev;
@@ -293,7 +313,7 @@ static int vif_delete(int vifi)
293 ip_rt_multicast_event(in_dev); 313 ip_rt_multicast_event(in_dev);
294 } 314 }
295 315
296 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER)) 316 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
297 unregister_netdevice(dev); 317 unregister_netdevice(dev);
298 318
299 dev_put(dev); 319 dev_put(dev);
@@ -398,6 +418,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
398 struct vif_device *v = &vif_table[vifi]; 418 struct vif_device *v = &vif_table[vifi];
399 struct net_device *dev; 419 struct net_device *dev;
400 struct in_device *in_dev; 420 struct in_device *in_dev;
421 int err;
401 422
402 /* Is vif busy ? */ 423 /* Is vif busy ? */
403 if (VIF_EXISTS(vifi)) 424 if (VIF_EXISTS(vifi))
@@ -415,18 +436,34 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
415 dev = ipmr_reg_vif(); 436 dev = ipmr_reg_vif();
416 if (!dev) 437 if (!dev)
417 return -ENOBUFS; 438 return -ENOBUFS;
439 err = dev_set_allmulti(dev, 1);
440 if (err) {
441 unregister_netdevice(dev);
442 dev_put(dev);
443 return err;
444 }
418 break; 445 break;
419#endif 446#endif
420 case VIFF_TUNNEL: 447 case VIFF_TUNNEL:
421 dev = ipmr_new_tunnel(vifc); 448 dev = ipmr_new_tunnel(vifc);
422 if (!dev) 449 if (!dev)
423 return -ENOBUFS; 450 return -ENOBUFS;
451 err = dev_set_allmulti(dev, 1);
452 if (err) {
453 ipmr_del_tunnel(dev, vifc);
454 dev_put(dev);
455 return err;
456 }
424 break; 457 break;
425 case 0: 458 case 0:
426 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr); 459 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
427 if (!dev) 460 if (!dev)
428 return -EADDRNOTAVAIL; 461 return -EADDRNOTAVAIL;
429 dev_put(dev); 462 err = dev_set_allmulti(dev, 1);
463 if (err) {
464 dev_put(dev);
465 return err;
466 }
430 break; 467 break;
431 default: 468 default:
432 return -EINVAL; 469 return -EINVAL;
@@ -435,7 +472,6 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
435 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) 472 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
436 return -EADDRNOTAVAIL; 473 return -EADDRNOTAVAIL;
437 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 474 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
438 dev_set_allmulti(dev, +1);
439 ip_rt_multicast_event(in_dev); 475 ip_rt_multicast_event(in_dev);
440 476
441 /* 477 /*
@@ -458,7 +494,6 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
458 494
459 /* And finish update writing critical data */ 495 /* And finish update writing critical data */
460 write_lock_bh(&mrt_lock); 496 write_lock_bh(&mrt_lock);
461 dev_hold(dev);
462 v->dev=dev; 497 v->dev=dev;
463#ifdef CONFIG_IP_PIMSM 498#ifdef CONFIG_IP_PIMSM
464 if (v->flags&VIFF_REGISTER) 499 if (v->flags&VIFF_REGISTER)
@@ -805,7 +840,7 @@ static void mroute_clean_tables(struct sock *sk)
805 */ 840 */
806 for (i=0; i<maxvif; i++) { 841 for (i=0; i<maxvif; i++) {
807 if (!(vif_table[i].flags&VIFF_STATIC)) 842 if (!(vif_table[i].flags&VIFF_STATIC))
808 vif_delete(i); 843 vif_delete(i, 0);
809 } 844 }
810 845
811 /* 846 /*
@@ -918,7 +953,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt
918 if (optname==MRT_ADD_VIF) { 953 if (optname==MRT_ADD_VIF) {
919 ret = vif_add(&vif, sk==mroute_socket); 954 ret = vif_add(&vif, sk==mroute_socket);
920 } else { 955 } else {
921 ret = vif_delete(vif.vifc_vifi); 956 ret = vif_delete(vif.vifc_vifi, 0);
922 } 957 }
923 rtnl_unlock(); 958 rtnl_unlock();
924 return ret; 959 return ret;
@@ -1097,7 +1132,7 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1097 v=&vif_table[0]; 1132 v=&vif_table[0];
1098 for (ct=0;ct<maxvif;ct++,v++) { 1133 for (ct=0;ct<maxvif;ct++,v++) {
1099 if (v->dev==dev) 1134 if (v->dev==dev)
1100 vif_delete(ct); 1135 vif_delete(ct, 1);
1101 } 1136 }
1102 return NOTIFY_DONE; 1137 return NOTIFY_DONE;
1103} 1138}
@@ -1143,7 +1178,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1143{ 1178{
1144 struct ip_options * opt = &(IPCB(skb)->opt); 1179 struct ip_options * opt = &(IPCB(skb)->opt);
1145 1180
1146 IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); 1181 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1147 1182
1148 if (unlikely(opt->optlen)) 1183 if (unlikely(opt->optlen))
1149 ip_forward_options(skb); 1184 ip_forward_options(skb);
@@ -1170,8 +1205,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1170 if (vif->flags & VIFF_REGISTER) { 1205 if (vif->flags & VIFF_REGISTER) {
1171 vif->pkt_out++; 1206 vif->pkt_out++;
1172 vif->bytes_out+=skb->len; 1207 vif->bytes_out+=skb->len;
1173 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len; 1208 vif->dev->stats.tx_bytes += skb->len;
1174 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++; 1209 vif->dev->stats.tx_packets++;
1175 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1210 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1176 kfree_skb(skb); 1211 kfree_skb(skb);
1177 return; 1212 return;
@@ -1206,7 +1241,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1206 to blackhole. 1241 to blackhole.
1207 */ 1242 */
1208 1243
1209 IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS); 1244 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1210 ip_rt_put(rt); 1245 ip_rt_put(rt);
1211 goto out_free; 1246 goto out_free;
1212 } 1247 }
@@ -1230,8 +1265,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1230 if (vif->flags & VIFF_TUNNEL) { 1265 if (vif->flags & VIFF_TUNNEL) {
1231 ip_encap(skb, vif->local, vif->remote); 1266 ip_encap(skb, vif->local, vif->remote);
1232 /* FIXME: extra output firewall step used to be here. --RR */ 1267 /* FIXME: extra output firewall step used to be here. --RR */
1233 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++; 1268 vif->dev->stats.tx_packets++;
1234 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len; 1269 vif->dev->stats.tx_bytes += skb->len;
1235 } 1270 }
1236 1271
1237 IPCB(skb)->flags |= IPSKB_FORWARDED; 1272 IPCB(skb)->flags |= IPSKB_FORWARDED;
@@ -1487,8 +1522,8 @@ int pim_rcv_v1(struct sk_buff * skb)
1487 skb->pkt_type = PACKET_HOST; 1522 skb->pkt_type = PACKET_HOST;
1488 dst_release(skb->dst); 1523 dst_release(skb->dst);
1489 skb->dst = NULL; 1524 skb->dst = NULL;
1490 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1525 reg_dev->stats.rx_bytes += skb->len;
1491 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1526 reg_dev->stats.rx_packets++;
1492 nf_reset(skb); 1527 nf_reset(skb);
1493 netif_rx(skb); 1528 netif_rx(skb);
1494 dev_put(reg_dev); 1529 dev_put(reg_dev);
@@ -1542,8 +1577,8 @@ static int pim_rcv(struct sk_buff * skb)
1542 skb->ip_summed = 0; 1577 skb->ip_summed = 0;
1543 skb->pkt_type = PACKET_HOST; 1578 skb->pkt_type = PACKET_HOST;
1544 dst_release(skb->dst); 1579 dst_release(skb->dst);
1545 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1580 reg_dev->stats.rx_bytes += skb->len;
1546 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1581 reg_dev->stats.rx_packets++;
1547 skb->dst = NULL; 1582 skb->dst = NULL;
1548 nf_reset(skb); 1583 nf_reset(skb);
1549 netif_rx(skb); 1584 netif_rx(skb);
@@ -1887,16 +1922,36 @@ static struct net_protocol pim_protocol = {
1887 * Setup for IP multicast routing 1922 * Setup for IP multicast routing
1888 */ 1923 */
1889 1924
1890void __init ip_mr_init(void) 1925int __init ip_mr_init(void)
1891{ 1926{
1927 int err;
1928
1892 mrt_cachep = kmem_cache_create("ip_mrt_cache", 1929 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1893 sizeof(struct mfc_cache), 1930 sizeof(struct mfc_cache),
1894 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1931 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1895 NULL); 1932 NULL);
1933 if (!mrt_cachep)
1934 return -ENOMEM;
1935
1896 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); 1936 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
1897 register_netdevice_notifier(&ip_mr_notifier); 1937 err = register_netdevice_notifier(&ip_mr_notifier);
1938 if (err)
1939 goto reg_notif_fail;
1898#ifdef CONFIG_PROC_FS 1940#ifdef CONFIG_PROC_FS
1899 proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops); 1941 err = -ENOMEM;
1900 proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops); 1942 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1943 goto proc_vif_fail;
1944 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1945 goto proc_cache_fail;
1901#endif 1946#endif
1947 return 0;
1948reg_notif_fail:
1949 kmem_cache_destroy(mrt_cachep);
1950#ifdef CONFIG_PROC_FS
1951proc_vif_fail:
1952 unregister_netdevice_notifier(&ip_mr_notifier);
1953proc_cache_fail:
1954 proc_net_remove(&init_net, "ip_mr_vif");
1955#endif
1956 return err;
1902} 1957}
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 535abe0c45e7..1f1897a1a702 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_app.c: Application module support for IPVS 2 * ip_vs_app.c: Application module support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 65f1ba112752..f8bdae47a77f 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_conn.c,v 1.31 2003/04/18 09:03:16 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 963981a9d501..a7879eafc3b5 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_core.c,v 1.34 2003/05/10 03:05:23 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
@@ -993,7 +991,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
993 == sysctl_ip_vs_sync_threshold[0])) || 991 == sysctl_ip_vs_sync_threshold[0])) ||
994 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && 992 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
995 ((cp->state == IP_VS_TCP_S_FIN_WAIT) || 993 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
996 (cp->state == IP_VS_TCP_S_CLOSE))))) 994 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
995 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
997 ip_vs_sync_conn(cp); 996 ip_vs_sync_conn(cp);
998 cp->old_state = cp->state; 997 cp->old_state = cp->state;
999 998
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 94c5767c8e01..9a5ace0b4dd6 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_ctl.c,v 1.36 2003/06/08 09:31:19 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index dcf5d46aaa5e..8afc1503ed20 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Destination Hashing scheduling module 2 * IPVS: Destination Hashing scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_dh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * Inspired by the consistent hashing scheduler patch from 6 * Inspired by the consistent hashing scheduler patch from
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index dfa0d713c801..bc04eedd6dbb 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_est.c: simple rate estimator for IPVS 2 * ip_vs_est.c: simple rate estimator for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_est.c,v 1.4 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index 59aa166b7678..c1c758e4f733 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_ftp.c: IPVS ftp application module 2 * ip_vs_ftp.c: IPVS ftp application module
3 * 3 *
4 * Version: $Id: ip_vs_ftp.c,v 1.13 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * Changes: 6 * Changes:
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 3888642706ad..0efa3db4b180 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Locality-Based Least-Connection scheduling module 2 * IPVS: Locality-Based Least-Connection scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_lblc.c,v 1.10 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index daa260eb21cf..8e3bbeb45138 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Locality-Based Least-Connection with Replication scheduler 2 * IPVS: Locality-Based Least-Connection with Replication scheduler
3 * 3 *
4 * Version: $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index d88fef90a641..ac9f08e065d5 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Least-Connection Scheduling module 2 * IPVS: Least-Connection Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_lc.c,v 1.10 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index bc2a9e5f2a7b..a46bf258d420 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Never Queue scheduling module 2 * IPVS: Never Queue scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_nq.c,v 1.2 2003/06/08 09:31:19 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 4b1c16cbb16b..876714f23d65 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto.c: transport protocol load balancing support for IPVS 2 * ip_vs_proto.c: transport protocol load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto.c,v 1.2 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c
index 4bf835e1d86d..73e0ea87c1f5 100644
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ b/net/ipv4/ipvs/ip_vs_proto_ah.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS 2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_ah.c,v 1.1 2003/07/04 15:04:37 wensong Exp $
5 *
6 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
7 * Wensong Zhang <wensong@linuxvirtualserver.org> 5 * Wensong Zhang <wensong@linuxvirtualserver.org>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
index db6a6b7b1a0b..21d70c8ffa54 100644
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_esp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS 2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_esp.c,v 1.1 2003/07/04 15:04:37 wensong Exp $
5 *
6 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
7 * Wensong Zhang <wensong@linuxvirtualserver.org> 5 * Wensong Zhang <wensong@linuxvirtualserver.org>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index b83dc14b0a4d..d0ea467986a0 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS 2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_tcp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index 75771cb3cd6f..c6be5d56823f 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS 2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_udp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index 433f8a947924..c8db12d39e61 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Round-Robin Scheduling module 2 * IPVS: Round-Robin Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_rr.c,v 1.9 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Peter Kese <peter.kese@ijs.si> 5 * Peter Kese <peter.kese@ijs.si>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index 121a32b1b756..b64767309855 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_sched.c,v 1.13 2003/05/10 03:05:23 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * 10 *
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index dd7c128f9db3..2a7d31358181 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Shortest Expected Delay scheduling module 2 * IPVS: Shortest Expected Delay scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_sed.c,v 1.1 2003/05/10 03:06:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index 1b25b00ef1e1..b8fdfac65001 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Source Hashing scheduling module 2 * IPVS: Source Hashing scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_sh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index eff54efe0351..45e9bd96c286 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_sync.c,v 1.13 2003/06/08 09:31:19 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * 9 *
12 * ip_vs_sync: sync connection info from master load balancer to backups 10 * ip_vs_sync: sync connection info from master load balancer to backups
@@ -29,10 +27,12 @@
29#include <linux/in.h> 27#include <linux/in.h>
30#include <linux/igmp.h> /* for ip_mc_join_group */ 28#include <linux/igmp.h> /* for ip_mc_join_group */
31#include <linux/udp.h> 29#include <linux/udp.h>
30#include <linux/err.h>
31#include <linux/kthread.h>
32#include <linux/wait.h>
32 33
33#include <net/ip.h> 34#include <net/ip.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <asm/uaccess.h> /* for get_fs and set_fs */
36 36
37#include <net/ip_vs.h> 37#include <net/ip_vs.h>
38 38
@@ -68,8 +68,8 @@ struct ip_vs_sync_conn_options {
68}; 68};
69 69
70struct ip_vs_sync_thread_data { 70struct ip_vs_sync_thread_data {
71 struct completion *startup; 71 struct socket *sock;
72 int state; 72 char *buf;
73}; 73};
74 74
75#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) 75#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
@@ -140,18 +140,19 @@ volatile int ip_vs_backup_syncid = 0;
140char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 140char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
141char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 141char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
142 142
143/* sync daemon tasks */
144static struct task_struct *sync_master_thread;
145static struct task_struct *sync_backup_thread;
146
143/* multicast addr */ 147/* multicast addr */
144static struct sockaddr_in mcast_addr; 148static struct sockaddr_in mcast_addr = {
149 .sin_family = AF_INET,
150 .sin_port = __constant_htons(IP_VS_SYNC_PORT),
151 .sin_addr.s_addr = __constant_htonl(IP_VS_SYNC_GROUP),
152};
145 153
146 154
147static inline void sb_queue_tail(struct ip_vs_sync_buff *sb) 155static inline struct ip_vs_sync_buff *sb_dequeue(void)
148{
149 spin_lock(&ip_vs_sync_lock);
150 list_add_tail(&sb->list, &ip_vs_sync_queue);
151 spin_unlock(&ip_vs_sync_lock);
152}
153
154static inline struct ip_vs_sync_buff * sb_dequeue(void)
155{ 156{
156 struct ip_vs_sync_buff *sb; 157 struct ip_vs_sync_buff *sb;
157 158
@@ -195,6 +196,16 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
195 kfree(sb); 196 kfree(sb);
196} 197}
197 198
199static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
200{
201 spin_lock(&ip_vs_sync_lock);
202 if (ip_vs_sync_state & IP_VS_STATE_MASTER)
203 list_add_tail(&sb->list, &ip_vs_sync_queue);
204 else
205 ip_vs_sync_buff_release(sb);
206 spin_unlock(&ip_vs_sync_lock);
207}
208
198/* 209/*
199 * Get the current sync buffer if it has been created for more 210 * Get the current sync buffer if it has been created for more
200 * than the specified time or the specified time is zero. 211 * than the specified time or the specified time is zero.
@@ -574,14 +585,17 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
574static struct socket * make_send_sock(void) 585static struct socket * make_send_sock(void)
575{ 586{
576 struct socket *sock; 587 struct socket *sock;
588 int result;
577 589
578 /* First create a socket */ 590 /* First create a socket */
579 if (sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock) < 0) { 591 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
592 if (result < 0) {
580 IP_VS_ERR("Error during creation of socket; terminating\n"); 593 IP_VS_ERR("Error during creation of socket; terminating\n");
581 return NULL; 594 return ERR_PTR(result);
582 } 595 }
583 596
584 if (set_mcast_if(sock->sk, ip_vs_master_mcast_ifn) < 0) { 597 result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
598 if (result < 0) {
585 IP_VS_ERR("Error setting outbound mcast interface\n"); 599 IP_VS_ERR("Error setting outbound mcast interface\n");
586 goto error; 600 goto error;
587 } 601 }
@@ -589,14 +603,15 @@ static struct socket * make_send_sock(void)
589 set_mcast_loop(sock->sk, 0); 603 set_mcast_loop(sock->sk, 0);
590 set_mcast_ttl(sock->sk, 1); 604 set_mcast_ttl(sock->sk, 1);
591 605
592 if (bind_mcastif_addr(sock, ip_vs_master_mcast_ifn) < 0) { 606 result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
607 if (result < 0) {
593 IP_VS_ERR("Error binding address of the mcast interface\n"); 608 IP_VS_ERR("Error binding address of the mcast interface\n");
594 goto error; 609 goto error;
595 } 610 }
596 611
597 if (sock->ops->connect(sock, 612 result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
598 (struct sockaddr*)&mcast_addr, 613 sizeof(struct sockaddr), 0);
599 sizeof(struct sockaddr), 0) < 0) { 614 if (result < 0) {
600 IP_VS_ERR("Error connecting to the multicast addr\n"); 615 IP_VS_ERR("Error connecting to the multicast addr\n");
601 goto error; 616 goto error;
602 } 617 }
@@ -605,7 +620,7 @@ static struct socket * make_send_sock(void)
605 620
606 error: 621 error:
607 sock_release(sock); 622 sock_release(sock);
608 return NULL; 623 return ERR_PTR(result);
609} 624}
610 625
611 626
@@ -615,27 +630,30 @@ static struct socket * make_send_sock(void)
615static struct socket * make_receive_sock(void) 630static struct socket * make_receive_sock(void)
616{ 631{
617 struct socket *sock; 632 struct socket *sock;
633 int result;
618 634
619 /* First create a socket */ 635 /* First create a socket */
620 if (sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock) < 0) { 636 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
637 if (result < 0) {
621 IP_VS_ERR("Error during creation of socket; terminating\n"); 638 IP_VS_ERR("Error during creation of socket; terminating\n");
622 return NULL; 639 return ERR_PTR(result);
623 } 640 }
624 641
625 /* it is equivalent to the REUSEADDR option in user-space */ 642 /* it is equivalent to the REUSEADDR option in user-space */
626 sock->sk->sk_reuse = 1; 643 sock->sk->sk_reuse = 1;
627 644
628 if (sock->ops->bind(sock, 645 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
629 (struct sockaddr*)&mcast_addr, 646 sizeof(struct sockaddr));
630 sizeof(struct sockaddr)) < 0) { 647 if (result < 0) {
631 IP_VS_ERR("Error binding to the multicast addr\n"); 648 IP_VS_ERR("Error binding to the multicast addr\n");
632 goto error; 649 goto error;
633 } 650 }
634 651
635 /* join the multicast group */ 652 /* join the multicast group */
636 if (join_mcast_group(sock->sk, 653 result = join_mcast_group(sock->sk,
637 (struct in_addr*)&mcast_addr.sin_addr, 654 (struct in_addr *) &mcast_addr.sin_addr,
638 ip_vs_backup_mcast_ifn) < 0) { 655 ip_vs_backup_mcast_ifn);
656 if (result < 0) {
639 IP_VS_ERR("Error joining to the multicast group\n"); 657 IP_VS_ERR("Error joining to the multicast group\n");
640 goto error; 658 goto error;
641 } 659 }
@@ -644,7 +662,7 @@ static struct socket * make_receive_sock(void)
644 662
645 error: 663 error:
646 sock_release(sock); 664 sock_release(sock);
647 return NULL; 665 return ERR_PTR(result);
648} 666}
649 667
650 668
@@ -702,44 +720,29 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
702} 720}
703 721
704 722
705static DECLARE_WAIT_QUEUE_HEAD(sync_wait); 723static int sync_thread_master(void *data)
706static pid_t sync_master_pid = 0;
707static pid_t sync_backup_pid = 0;
708
709static DECLARE_WAIT_QUEUE_HEAD(stop_sync_wait);
710static int stop_master_sync = 0;
711static int stop_backup_sync = 0;
712
713static void sync_master_loop(void)
714{ 724{
715 struct socket *sock; 725 struct ip_vs_sync_thread_data *tinfo = data;
716 struct ip_vs_sync_buff *sb; 726 struct ip_vs_sync_buff *sb;
717 727
718 /* create the sending multicast socket */
719 sock = make_send_sock();
720 if (!sock)
721 return;
722
723 IP_VS_INFO("sync thread started: state = MASTER, mcast_ifn = %s, " 728 IP_VS_INFO("sync thread started: state = MASTER, mcast_ifn = %s, "
724 "syncid = %d\n", 729 "syncid = %d\n",
725 ip_vs_master_mcast_ifn, ip_vs_master_syncid); 730 ip_vs_master_mcast_ifn, ip_vs_master_syncid);
726 731
727 for (;;) { 732 while (!kthread_should_stop()) {
728 while ((sb=sb_dequeue())) { 733 while ((sb = sb_dequeue())) {
729 ip_vs_send_sync_msg(sock, sb->mesg); 734 ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
730 ip_vs_sync_buff_release(sb); 735 ip_vs_sync_buff_release(sb);
731 } 736 }
732 737
733 /* check if entries stay in curr_sb for 2 seconds */ 738 /* check if entries stay in curr_sb for 2 seconds */
734 if ((sb = get_curr_sync_buff(2*HZ))) { 739 sb = get_curr_sync_buff(2 * HZ);
735 ip_vs_send_sync_msg(sock, sb->mesg); 740 if (sb) {
741 ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
736 ip_vs_sync_buff_release(sb); 742 ip_vs_sync_buff_release(sb);
737 } 743 }
738 744
739 if (stop_master_sync) 745 schedule_timeout_interruptible(HZ);
740 break;
741
742 msleep_interruptible(1000);
743 } 746 }
744 747
745 /* clean up the sync_buff queue */ 748 /* clean up the sync_buff queue */
@@ -753,267 +756,175 @@ static void sync_master_loop(void)
753 } 756 }
754 757
755 /* release the sending multicast socket */ 758 /* release the sending multicast socket */
756 sock_release(sock); 759 sock_release(tinfo->sock);
760 kfree(tinfo);
761
762 return 0;
757} 763}
758 764
759 765
760static void sync_backup_loop(void) 766static int sync_thread_backup(void *data)
761{ 767{
762 struct socket *sock; 768 struct ip_vs_sync_thread_data *tinfo = data;
763 char *buf;
764 int len; 769 int len;
765 770
766 if (!(buf = kmalloc(sync_recv_mesg_maxlen, GFP_ATOMIC))) {
767 IP_VS_ERR("sync_backup_loop: kmalloc error\n");
768 return;
769 }
770
771 /* create the receiving multicast socket */
772 sock = make_receive_sock();
773 if (!sock)
774 goto out;
775
776 IP_VS_INFO("sync thread started: state = BACKUP, mcast_ifn = %s, " 771 IP_VS_INFO("sync thread started: state = BACKUP, mcast_ifn = %s, "
777 "syncid = %d\n", 772 "syncid = %d\n",
778 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); 773 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
779 774
780 for (;;) { 775 while (!kthread_should_stop()) {
781 /* do you have data now? */ 776 wait_event_interruptible(*tinfo->sock->sk->sk_sleep,
782 while (!skb_queue_empty(&(sock->sk->sk_receive_queue))) { 777 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
783 if ((len = 778 || kthread_should_stop());
784 ip_vs_receive(sock, buf, 779
785 sync_recv_mesg_maxlen)) <= 0) { 780 /* do we have data now? */
781 while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
782 len = ip_vs_receive(tinfo->sock, tinfo->buf,
783 sync_recv_mesg_maxlen);
784 if (len <= 0) {
786 IP_VS_ERR("receiving message error\n"); 785 IP_VS_ERR("receiving message error\n");
787 break; 786 break;
788 } 787 }
789 /* disable bottom half, because it accessed the data 788
789 /* disable bottom half, because it accesses the data
790 shared by softirq while getting/creating conns */ 790 shared by softirq while getting/creating conns */
791 local_bh_disable(); 791 local_bh_disable();
792 ip_vs_process_message(buf, len); 792 ip_vs_process_message(tinfo->buf, len);
793 local_bh_enable(); 793 local_bh_enable();
794 } 794 }
795
796 if (stop_backup_sync)
797 break;
798
799 msleep_interruptible(1000);
800 } 795 }
801 796
802 /* release the sending multicast socket */ 797 /* release the sending multicast socket */
803 sock_release(sock); 798 sock_release(tinfo->sock);
799 kfree(tinfo->buf);
800 kfree(tinfo);
804 801
805 out: 802 return 0;
806 kfree(buf);
807} 803}
808 804
809 805
810static void set_sync_pid(int sync_state, pid_t sync_pid) 806int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
811{
812 if (sync_state == IP_VS_STATE_MASTER)
813 sync_master_pid = sync_pid;
814 else if (sync_state == IP_VS_STATE_BACKUP)
815 sync_backup_pid = sync_pid;
816}
817
818static void set_stop_sync(int sync_state, int set)
819{ 807{
820 if (sync_state == IP_VS_STATE_MASTER) 808 struct ip_vs_sync_thread_data *tinfo;
821 stop_master_sync = set; 809 struct task_struct **realtask, *task;
822 else if (sync_state == IP_VS_STATE_BACKUP) 810 struct socket *sock;
823 stop_backup_sync = set; 811 char *name, *buf = NULL;
824 else { 812 int (*threadfn)(void *data);
825 stop_master_sync = set; 813 int result = -ENOMEM;
826 stop_backup_sync = set;
827 }
828}
829 814
830static int sync_thread(void *startup) 815 IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current));
831{ 816 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
832 DECLARE_WAITQUEUE(wait, current); 817 sizeof(struct ip_vs_sync_conn));
833 mm_segment_t oldmm;
834 int state;
835 const char *name;
836 struct ip_vs_sync_thread_data *tinfo = startup;
837 818
838 /* increase the module use count */ 819 if (state == IP_VS_STATE_MASTER) {
839 ip_vs_use_count_inc(); 820 if (sync_master_thread)
821 return -EEXIST;
840 822
841 if (ip_vs_sync_state & IP_VS_STATE_MASTER && !sync_master_pid) { 823 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
842 state = IP_VS_STATE_MASTER; 824 sizeof(ip_vs_master_mcast_ifn));
825 ip_vs_master_syncid = syncid;
826 realtask = &sync_master_thread;
843 name = "ipvs_syncmaster"; 827 name = "ipvs_syncmaster";
844 } else if (ip_vs_sync_state & IP_VS_STATE_BACKUP && !sync_backup_pid) { 828 threadfn = sync_thread_master;
845 state = IP_VS_STATE_BACKUP; 829 sock = make_send_sock();
830 } else if (state == IP_VS_STATE_BACKUP) {
831 if (sync_backup_thread)
832 return -EEXIST;
833
834 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
835 sizeof(ip_vs_backup_mcast_ifn));
836 ip_vs_backup_syncid = syncid;
837 realtask = &sync_backup_thread;
846 name = "ipvs_syncbackup"; 838 name = "ipvs_syncbackup";
839 threadfn = sync_thread_backup;
840 sock = make_receive_sock();
847 } else { 841 } else {
848 IP_VS_BUG();
849 ip_vs_use_count_dec();
850 return -EINVAL; 842 return -EINVAL;
851 } 843 }
852 844
853 daemonize(name); 845 if (IS_ERR(sock)) {
854 846 result = PTR_ERR(sock);
855 oldmm = get_fs(); 847 goto out;
856 set_fs(KERNEL_DS); 848 }
857
858 /* Block all signals */
859 spin_lock_irq(&current->sighand->siglock);
860 siginitsetinv(&current->blocked, 0);
861 recalc_sigpending();
862 spin_unlock_irq(&current->sighand->siglock);
863 849
864 /* set the maximum length of sync message */
865 set_sync_mesg_maxlen(state); 850 set_sync_mesg_maxlen(state);
851 if (state == IP_VS_STATE_BACKUP) {
852 buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
853 if (!buf)
854 goto outsocket;
855 }
866 856
867 /* set up multicast address */ 857 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
868 mcast_addr.sin_family = AF_INET; 858 if (!tinfo)
869 mcast_addr.sin_port = htons(IP_VS_SYNC_PORT); 859 goto outbuf;
870 mcast_addr.sin_addr.s_addr = htonl(IP_VS_SYNC_GROUP);
871
872 add_wait_queue(&sync_wait, &wait);
873
874 set_sync_pid(state, task_pid_nr(current));
875 complete(tinfo->startup);
876
877 /*
878 * once we call the completion queue above, we should
879 * null out that reference, since its allocated on the
880 * stack of the creating kernel thread
881 */
882 tinfo->startup = NULL;
883
884 /* processing master/backup loop here */
885 if (state == IP_VS_STATE_MASTER)
886 sync_master_loop();
887 else if (state == IP_VS_STATE_BACKUP)
888 sync_backup_loop();
889 else IP_VS_BUG();
890
891 remove_wait_queue(&sync_wait, &wait);
892
893 /* thread exits */
894
895 /*
896 * If we weren't explicitly stopped, then we
897 * exited in error, and should undo our state
898 */
899 if ((!stop_master_sync) && (!stop_backup_sync))
900 ip_vs_sync_state -= tinfo->state;
901 860
902 set_sync_pid(state, 0); 861 tinfo->sock = sock;
903 IP_VS_INFO("sync thread stopped!\n"); 862 tinfo->buf = buf;
904 863
905 set_fs(oldmm); 864 task = kthread_run(threadfn, tinfo, name);
865 if (IS_ERR(task)) {
866 result = PTR_ERR(task);
867 goto outtinfo;
868 }
906 869
907 /* decrease the module use count */ 870 /* mark as active */
908 ip_vs_use_count_dec(); 871 *realtask = task;
872 ip_vs_sync_state |= state;
909 873
910 set_stop_sync(state, 0); 874 /* increase the module use count */
911 wake_up(&stop_sync_wait); 875 ip_vs_use_count_inc();
912 876
913 /*
914 * we need to free the structure that was allocated
915 * for us in start_sync_thread
916 */
917 kfree(tinfo);
918 return 0; 877 return 0;
919}
920
921
922static int fork_sync_thread(void *startup)
923{
924 pid_t pid;
925
926 /* fork the sync thread here, then the parent process of the
927 sync thread is the init process after this thread exits. */
928 repeat:
929 if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) {
930 IP_VS_ERR("could not create sync_thread due to %d... "
931 "retrying.\n", pid);
932 msleep_interruptible(1000);
933 goto repeat;
934 }
935 878
936 return 0; 879outtinfo:
880 kfree(tinfo);
881outbuf:
882 kfree(buf);
883outsocket:
884 sock_release(sock);
885out:
886 return result;
937} 887}
938 888
939 889
940int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) 890int stop_sync_thread(int state)
941{ 891{
942 DECLARE_COMPLETION_ONSTACK(startup);
943 pid_t pid;
944 struct ip_vs_sync_thread_data *tinfo;
945
946 if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
947 (state == IP_VS_STATE_BACKUP && sync_backup_pid))
948 return -EEXIST;
949
950 /*
951 * Note that tinfo will be freed in sync_thread on exit
952 */
953 tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL);
954 if (!tinfo)
955 return -ENOMEM;
956
957 IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); 892 IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current));
958 IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
959 sizeof(struct ip_vs_sync_conn));
960 893
961 ip_vs_sync_state |= state;
962 if (state == IP_VS_STATE_MASTER) { 894 if (state == IP_VS_STATE_MASTER) {
963 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, 895 if (!sync_master_thread)
964 sizeof(ip_vs_master_mcast_ifn)); 896 return -ESRCH;
965 ip_vs_master_syncid = syncid;
966 } else {
967 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
968 sizeof(ip_vs_backup_mcast_ifn));
969 ip_vs_backup_syncid = syncid;
970 }
971
972 tinfo->state = state;
973 tinfo->startup = &startup;
974
975 repeat:
976 if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) {
977 IP_VS_ERR("could not create fork_sync_thread due to %d... "
978 "retrying.\n", pid);
979 msleep_interruptible(1000);
980 goto repeat;
981 }
982
983 wait_for_completion(&startup);
984
985 return 0;
986}
987 897
898 IP_VS_INFO("stopping master sync thread %d ...\n",
899 task_pid_nr(sync_master_thread));
988 900
989int stop_sync_thread(int state) 901 /*
990{ 902 * The lock synchronizes with sb_queue_tail(), so that we don't
991 DECLARE_WAITQUEUE(wait, current); 903 * add sync buffers to the queue, when we are already in
904 * progress of stopping the master sync daemon.
905 */
992 906
993 if ((state == IP_VS_STATE_MASTER && !sync_master_pid) || 907 spin_lock(&ip_vs_sync_lock);
994 (state == IP_VS_STATE_BACKUP && !sync_backup_pid)) 908 ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
995 return -ESRCH; 909 spin_unlock(&ip_vs_sync_lock);
910 kthread_stop(sync_master_thread);
911 sync_master_thread = NULL;
912 } else if (state == IP_VS_STATE_BACKUP) {
913 if (!sync_backup_thread)
914 return -ESRCH;
915
916 IP_VS_INFO("stopping backup sync thread %d ...\n",
917 task_pid_nr(sync_backup_thread));
918
919 ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
920 kthread_stop(sync_backup_thread);
921 sync_backup_thread = NULL;
922 } else {
923 return -EINVAL;
924 }
996 925
997 IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); 926 /* decrease the module use count */
998 IP_VS_INFO("stopping sync thread %d ...\n", 927 ip_vs_use_count_dec();
999 (state == IP_VS_STATE_MASTER) ?
1000 sync_master_pid : sync_backup_pid);
1001
1002 __set_current_state(TASK_UNINTERRUPTIBLE);
1003 add_wait_queue(&stop_sync_wait, &wait);
1004 set_stop_sync(state, 1);
1005 ip_vs_sync_state -= state;
1006 wake_up(&sync_wait);
1007 schedule();
1008 __set_current_state(TASK_RUNNING);
1009 remove_wait_queue(&stop_sync_wait, &wait);
1010
1011 /* Note: no need to reap the sync thread, because its parent
1012 process is the init process */
1013
1014 if ((state == IP_VS_STATE_MASTER && stop_master_sync) ||
1015 (state == IP_VS_STATE_BACKUP && stop_backup_sync))
1016 IP_VS_BUG();
1017 928
1018 return 0; 929 return 0;
1019} 930}
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 8a9d913261d8..772c3cb4eca1 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Weighted Least-Connection Scheduling module 2 * IPVS: Weighted Least-Connection Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_wlc.c,v 1.13 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Peter Kese <peter.kese@ijs.si> 5 * Peter Kese <peter.kese@ijs.si>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 85c680add6df..1d6932d7dc97 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Weighted Round-Robin Scheduling module 2 * IPVS: Weighted Round-Robin Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_wrr.c,v 1.12 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index f63006caea03..9892d4aca42e 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_xmit.c: various packet transmitters for IPVS 2 * ip_vs_xmit.c: various packet transmitters for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_xmit.c,v 1.2 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2767841a8cef..f23e60c93ef9 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -213,8 +213,7 @@ config IP_NF_TARGET_NETMAP
213 help 213 help
214 NETMAP is an implementation of static 1:1 NAT mapping of network 214 NETMAP is an implementation of static 1:1 NAT mapping of network
215 addresses. It maps the network address part, while keeping the host 215 addresses. It maps the network address part, while keeping the host
216 address part intact. It is similar to Fast NAT, except that 216 address part intact.
217 Netfilter's connection tracking doesn't work well with Fast NAT.
218 217
219 To compile it as a module, choose M here. If unsure, say N. 218 To compile it as a module, choose M here. If unsure, say N.
220 219
@@ -365,6 +364,18 @@ config IP_NF_RAW
365 If you want to compile it as a module, say M here and read 364 If you want to compile it as a module, say M here and read
366 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 365 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
367 366
367# security table for MAC policy
368config IP_NF_SECURITY
369 tristate "Security table"
370 depends on IP_NF_IPTABLES
371 depends on SECURITY
372 default m if NETFILTER_ADVANCED=n
373 help
374 This option adds a `security' table to iptables, for use
375 with Mandatory Access Control (MAC) policy.
376
377 If unsure, say N.
378
368# ARP tables 379# ARP tables
369config IP_NF_ARPTABLES 380config IP_NF_ARPTABLES
370 tristate "ARP tables support" 381 tristate "ARP tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index d9b92fbf5579..3f31291f37ce 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
42obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o 42obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
43obj-$(CONFIG_NF_NAT) += iptable_nat.o 43obj-$(CONFIG_NF_NAT) += iptable_nat.o
44obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o 44obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
45obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
45 46
46# matches 47# matches
47obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o 48obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 26a37cedcf2e..aa33a4a7a715 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -156,7 +156,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
156 case IPQ_COPY_META: 156 case IPQ_COPY_META:
157 case IPQ_COPY_NONE: 157 case IPQ_COPY_NONE:
158 size = NLMSG_SPACE(sizeof(*pmsg)); 158 size = NLMSG_SPACE(sizeof(*pmsg));
159 data_len = 0;
160 break; 159 break;
161 160
162 case IPQ_COPY_PACKET: 161 case IPQ_COPY_PACKET:
@@ -224,8 +223,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
224 return skb; 223 return skb;
225 224
226nlmsg_failure: 225nlmsg_failure:
227 if (skb)
228 kfree_skb(skb);
229 *errp = -EINVAL; 226 *errp = -EINVAL;
230 printk(KERN_ERR "ip_queue: error creating packet message\n"); 227 printk(KERN_ERR "ip_queue: error creating packet message\n");
231 return NULL; 228 return NULL;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
new file mode 100644
index 000000000000..2b472ac2263a
--- /dev/null
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -0,0 +1,180 @@
1/*
2 * "security" table
3 *
4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context
6 * to DAC.
7 *
8 * Based on iptable_mangle.c
9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/module.h>
19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <net/ip.h>
21
22MODULE_LICENSE("GPL");
23MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
24MODULE_DESCRIPTION("iptables security table, for MAC rules");
25
26#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
27 (1 << NF_INET_FORWARD) | \
28 (1 << NF_INET_LOCAL_OUT)
29
30static struct
31{
32 struct ipt_replace repl;
33 struct ipt_standard entries[3];
34 struct ipt_error term;
35} initial_table __initdata = {
36 .repl = {
37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS,
39 .num_entries = 4,
40 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
41 .hook_entry = {
42 [NF_INET_LOCAL_IN] = 0,
43 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
44 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
45 },
46 .underflow = {
47 [NF_INET_LOCAL_IN] = 0,
48 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
49 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
50 },
51 },
52 .entries = {
53 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
54 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
56 },
57 .term = IPT_ERROR_INIT, /* ERROR */
58};
59
60static struct xt_table security_table = {
61 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS,
63 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
64 .me = THIS_MODULE,
65 .af = AF_INET,
66};
67
68static unsigned int
69ipt_local_in_hook(unsigned int hook,
70 struct sk_buff *skb,
71 const struct net_device *in,
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{
75 return ipt_do_table(skb, hook, in, out,
76 nf_local_in_net(in, out)->ipv4.iptable_security);
77}
78
79static unsigned int
80ipt_forward_hook(unsigned int hook,
81 struct sk_buff *skb,
82 const struct net_device *in,
83 const struct net_device *out,
84 int (*okfn)(struct sk_buff *))
85{
86 return ipt_do_table(skb, hook, in, out,
87 nf_forward_net(in, out)->ipv4.iptable_security);
88}
89
90static unsigned int
91ipt_local_out_hook(unsigned int hook,
92 struct sk_buff *skb,
93 const struct net_device *in,
94 const struct net_device *out,
95 int (*okfn)(struct sk_buff *))
96{
97 /* Somebody is playing with raw sockets. */
98 if (skb->len < sizeof(struct iphdr)
99 || ip_hdrlen(skb) < sizeof(struct iphdr)) {
100 if (net_ratelimit())
101 printk(KERN_INFO "iptable_security: ignoring short "
102 "SOCK_RAW packet.\n");
103 return NF_ACCEPT;
104 }
105 return ipt_do_table(skb, hook, in, out,
106 nf_local_out_net(in, out)->ipv4.iptable_security);
107}
108
109static struct nf_hook_ops ipt_ops[] __read_mostly = {
110 {
111 .hook = ipt_local_in_hook,
112 .owner = THIS_MODULE,
113 .pf = PF_INET,
114 .hooknum = NF_INET_LOCAL_IN,
115 .priority = NF_IP_PRI_SECURITY,
116 },
117 {
118 .hook = ipt_forward_hook,
119 .owner = THIS_MODULE,
120 .pf = PF_INET,
121 .hooknum = NF_INET_FORWARD,
122 .priority = NF_IP_PRI_SECURITY,
123 },
124 {
125 .hook = ipt_local_out_hook,
126 .owner = THIS_MODULE,
127 .pf = PF_INET,
128 .hooknum = NF_INET_LOCAL_OUT,
129 .priority = NF_IP_PRI_SECURITY,
130 },
131};
132
133static int __net_init iptable_security_net_init(struct net *net)
134{
135 net->ipv4.iptable_security =
136 ipt_register_table(net, &security_table, &initial_table.repl);
137
138 if (IS_ERR(net->ipv4.iptable_security))
139 return PTR_ERR(net->ipv4.iptable_security);
140
141 return 0;
142}
143
144static void __net_exit iptable_security_net_exit(struct net *net)
145{
146 ipt_unregister_table(net->ipv4.iptable_security);
147}
148
149static struct pernet_operations iptable_security_net_ops = {
150 .init = iptable_security_net_init,
151 .exit = iptable_security_net_exit,
152};
153
154static int __init iptable_security_init(void)
155{
156 int ret;
157
158 ret = register_pernet_subsys(&iptable_security_net_ops);
159 if (ret < 0)
160 return ret;
161
162 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
163 if (ret < 0)
164 goto cleanup_table;
165
166 return ret;
167
168cleanup_table:
169 unregister_pernet_subsys(&iptable_security_net_ops);
170 return ret;
171}
172
173static void __exit iptable_security_fini(void)
174{
175 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
176 unregister_pernet_subsys(&iptable_security_net_ops);
177}
178
179module_init(iptable_security_init);
180module_exit(iptable_security_fini);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 78ab19accace..97791048fa9b 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -87,9 +87,8 @@ static int icmp_packet(struct nf_conn *ct,
87 means this will only run once even if count hits zero twice 87 means this will only run once even if count hits zero twice
88 (theoretically possible with SMP) */ 88 (theoretically possible with SMP) */
89 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 89 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
90 if (atomic_dec_and_test(&ct->proto.icmp.count) 90 if (atomic_dec_and_test(&ct->proto.icmp.count))
91 && del_timer(&ct->timeout)) 91 nf_ct_kill_acct(ct, ctinfo, skb);
92 ct->timeout.function((unsigned long)ct);
93 } else { 92 } else {
94 atomic_inc(&ct->proto.icmp.count); 93 atomic_inc(&ct->proto.icmp.count);
95 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); 94 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 552169b41b16..eb5cee279c5f 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -7,8 +7,6 @@
7 * PROC file system. It is mainly used for debugging and 7 * PROC file system. It is mainly used for debugging and
8 * statistics. 8 * statistics.
9 * 9 *
10 * Version: $Id: proc.c,v 1.45 2001/05/16 16:45:35 davem Exp $
11 *
12 * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de> 11 * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
14 * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de> 12 * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de>
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 971ab9356e51..ea50da0649fd 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * INET protocol dispatch tables. 6 * INET protocol dispatch tables.
7 * 7 *
8 * Version: $Id: protocol.c,v 1.14 2001/05/18 02:25:49 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 10 *
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 37a1ecd9d600..cd975743bcd2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * RAW - implementation of IP "raw" sockets. 6 * RAW - implementation of IP "raw" sockets.
7 * 7 *
8 * Version: $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 10 *
@@ -322,6 +320,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
322 unsigned int flags) 320 unsigned int flags)
323{ 321{
324 struct inet_sock *inet = inet_sk(sk); 322 struct inet_sock *inet = inet_sk(sk);
323 struct net *net = sock_net(sk);
325 struct iphdr *iph; 324 struct iphdr *iph;
326 struct sk_buff *skb; 325 struct sk_buff *skb;
327 unsigned int iphlen; 326 unsigned int iphlen;
@@ -370,7 +369,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
370 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 369 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
371 } 370 }
372 if (iph->protocol == IPPROTO_ICMP) 371 if (iph->protocol == IPPROTO_ICMP)
373 icmp_out_count(((struct icmphdr *) 372 icmp_out_count(net, ((struct icmphdr *)
374 skb_transport_header(skb))->type); 373 skb_transport_header(skb))->type);
375 374
376 err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, 375 err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
@@ -386,7 +385,7 @@ error_fault:
386 err = -EFAULT; 385 err = -EFAULT;
387 kfree_skb(skb); 386 kfree_skb(skb);
388error: 387error:
389 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); 388 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
390 return err; 389 return err;
391} 390}
392 391
@@ -608,12 +607,11 @@ static void raw_close(struct sock *sk, long timeout)
608 sk_common_release(sk); 607 sk_common_release(sk);
609} 608}
610 609
611static int raw_destroy(struct sock *sk) 610static void raw_destroy(struct sock *sk)
612{ 611{
613 lock_sock(sk); 612 lock_sock(sk);
614 ip_flush_pending_frames(sk); 613 ip_flush_pending_frames(sk);
615 release_sock(sk); 614 release_sock(sk);
616 return 0;
617} 615}
618 616
619/* This gets rid of all the nasties in af_inet. -DaveM */ 617/* This gets rid of all the nasties in af_inet. -DaveM */
@@ -947,7 +945,7 @@ static int raw_seq_show(struct seq_file *seq, void *v)
947 if (v == SEQ_START_TOKEN) 945 if (v == SEQ_START_TOKEN)
948 seq_printf(seq, " sl local_address rem_address st tx_queue " 946 seq_printf(seq, " sl local_address rem_address st tx_queue "
949 "rx_queue tr tm->when retrnsmt uid timeout " 947 "rx_queue tr tm->when retrnsmt uid timeout "
950 "inode drops\n"); 948 "inode ref pointer drops\n");
951 else 949 else
952 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); 950 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
953 return 0; 951 return 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 96be336064fb..e4ab0ac94f92 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * ROUTE - implementation of the IP router. 6 * ROUTE - implementation of the IP router.
7 * 7 *
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
@@ -134,7 +132,6 @@ static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
134 132
135static void rt_worker_func(struct work_struct *work); 133static void rt_worker_func(struct work_struct *work);
136static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); 134static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
137static struct timer_list rt_secret_timer;
138 135
139/* 136/*
140 * Interface to generic destination cache. 137 * Interface to generic destination cache.
@@ -253,20 +250,25 @@ static inline void rt_hash_lock_init(void)
253static struct rt_hash_bucket *rt_hash_table __read_mostly; 250static struct rt_hash_bucket *rt_hash_table __read_mostly;
254static unsigned rt_hash_mask __read_mostly; 251static unsigned rt_hash_mask __read_mostly;
255static unsigned int rt_hash_log __read_mostly; 252static unsigned int rt_hash_log __read_mostly;
256static atomic_t rt_genid __read_mostly;
257 253
258static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 254static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
259#define RT_CACHE_STAT_INC(field) \ 255#define RT_CACHE_STAT_INC(field) \
260 (__raw_get_cpu_var(rt_cache_stat).field++) 256 (__raw_get_cpu_var(rt_cache_stat).field++)
261 257
262static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx) 258static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
259 int genid)
263{ 260{
264 return jhash_3words((__force u32)(__be32)(daddr), 261 return jhash_3words((__force u32)(__be32)(daddr),
265 (__force u32)(__be32)(saddr), 262 (__force u32)(__be32)(saddr),
266 idx, atomic_read(&rt_genid)) 263 idx, genid)
267 & rt_hash_mask; 264 & rt_hash_mask;
268} 265}
269 266
267static inline int rt_genid(struct net *net)
268{
269 return atomic_read(&net->ipv4.rt_genid);
270}
271
270#ifdef CONFIG_PROC_FS 272#ifdef CONFIG_PROC_FS
271struct rt_cache_iter_state { 273struct rt_cache_iter_state {
272 struct seq_net_private p; 274 struct seq_net_private p;
@@ -336,7 +338,7 @@ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
336 struct rt_cache_iter_state *st = seq->private; 338 struct rt_cache_iter_state *st = seq->private;
337 if (*pos) 339 if (*pos)
338 return rt_cache_get_idx(seq, *pos - 1); 340 return rt_cache_get_idx(seq, *pos - 1);
339 st->genid = atomic_read(&rt_genid); 341 st->genid = rt_genid(seq_file_net(seq));
340 return SEQ_START_TOKEN; 342 return SEQ_START_TOKEN;
341} 343}
342 344
@@ -683,6 +685,11 @@ static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
683 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); 685 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
684} 686}
685 687
688static inline int rt_is_expired(struct rtable *rth)
689{
690 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
691}
692
686/* 693/*
687 * Perform a full scan of hash table and free all entries. 694 * Perform a full scan of hash table and free all entries.
688 * Can be called by a softirq or a process. 695 * Can be called by a softirq or a process.
@@ -692,6 +699,7 @@ static void rt_do_flush(int process_context)
692{ 699{
693 unsigned int i; 700 unsigned int i;
694 struct rtable *rth, *next; 701 struct rtable *rth, *next;
702 struct rtable * tail;
695 703
696 for (i = 0; i <= rt_hash_mask; i++) { 704 for (i = 0; i <= rt_hash_mask; i++) {
697 if (process_context && need_resched()) 705 if (process_context && need_resched())
@@ -701,11 +709,39 @@ static void rt_do_flush(int process_context)
701 continue; 709 continue;
702 710
703 spin_lock_bh(rt_hash_lock_addr(i)); 711 spin_lock_bh(rt_hash_lock_addr(i));
712#ifdef CONFIG_NET_NS
713 {
714 struct rtable ** prev, * p;
715
716 rth = rt_hash_table[i].chain;
717
718 /* defer releasing the head of the list after spin_unlock */
719 for (tail = rth; tail; tail = tail->u.dst.rt_next)
720 if (!rt_is_expired(tail))
721 break;
722 if (rth != tail)
723 rt_hash_table[i].chain = tail;
724
725 /* call rt_free on entries after the tail requiring flush */
726 prev = &rt_hash_table[i].chain;
727 for (p = *prev; p; p = next) {
728 next = p->u.dst.rt_next;
729 if (!rt_is_expired(p)) {
730 prev = &p->u.dst.rt_next;
731 } else {
732 *prev = next;
733 rt_free(p);
734 }
735 }
736 }
737#else
704 rth = rt_hash_table[i].chain; 738 rth = rt_hash_table[i].chain;
705 rt_hash_table[i].chain = NULL; 739 rt_hash_table[i].chain = NULL;
740 tail = NULL;
741#endif
706 spin_unlock_bh(rt_hash_lock_addr(i)); 742 spin_unlock_bh(rt_hash_lock_addr(i));
707 743
708 for (; rth; rth = next) { 744 for (; rth != tail; rth = next) {
709 next = rth->u.dst.rt_next; 745 next = rth->u.dst.rt_next;
710 rt_free(rth); 746 rt_free(rth);
711 } 747 }
@@ -738,7 +774,7 @@ static void rt_check_expire(void)
738 continue; 774 continue;
739 spin_lock_bh(rt_hash_lock_addr(i)); 775 spin_lock_bh(rt_hash_lock_addr(i));
740 while ((rth = *rthp) != NULL) { 776 while ((rth = *rthp) != NULL) {
741 if (rth->rt_genid != atomic_read(&rt_genid)) { 777 if (rt_is_expired(rth)) {
742 *rthp = rth->u.dst.rt_next; 778 *rthp = rth->u.dst.rt_next;
743 rt_free(rth); 779 rt_free(rth);
744 continue; 780 continue;
@@ -781,21 +817,21 @@ static void rt_worker_func(struct work_struct *work)
781 * many times (2^24) without giving recent rt_genid. 817 * many times (2^24) without giving recent rt_genid.
782 * Jenkins hash is strong enough that litle changes of rt_genid are OK. 818 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
783 */ 819 */
784static void rt_cache_invalidate(void) 820static void rt_cache_invalidate(struct net *net)
785{ 821{
786 unsigned char shuffle; 822 unsigned char shuffle;
787 823
788 get_random_bytes(&shuffle, sizeof(shuffle)); 824 get_random_bytes(&shuffle, sizeof(shuffle));
789 atomic_add(shuffle + 1U, &rt_genid); 825 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
790} 826}
791 827
792/* 828/*
793 * delay < 0 : invalidate cache (fast : entries will be deleted later) 829 * delay < 0 : invalidate cache (fast : entries will be deleted later)
794 * delay >= 0 : invalidate & flush cache (can be long) 830 * delay >= 0 : invalidate & flush cache (can be long)
795 */ 831 */
796void rt_cache_flush(int delay) 832void rt_cache_flush(struct net *net, int delay)
797{ 833{
798 rt_cache_invalidate(); 834 rt_cache_invalidate(net);
799 if (delay >= 0) 835 if (delay >= 0)
800 rt_do_flush(!in_softirq()); 836 rt_do_flush(!in_softirq());
801} 837}
@@ -803,10 +839,11 @@ void rt_cache_flush(int delay)
803/* 839/*
804 * We change rt_genid and let gc do the cleanup 840 * We change rt_genid and let gc do the cleanup
805 */ 841 */
806static void rt_secret_rebuild(unsigned long dummy) 842static void rt_secret_rebuild(unsigned long __net)
807{ 843{
808 rt_cache_invalidate(); 844 struct net *net = (struct net *)__net;
809 mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval); 845 rt_cache_invalidate(net);
846 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
810} 847}
811 848
812/* 849/*
@@ -882,7 +919,7 @@ static int rt_garbage_collect(struct dst_ops *ops)
882 rthp = &rt_hash_table[k].chain; 919 rthp = &rt_hash_table[k].chain;
883 spin_lock_bh(rt_hash_lock_addr(k)); 920 spin_lock_bh(rt_hash_lock_addr(k));
884 while ((rth = *rthp) != NULL) { 921 while ((rth = *rthp) != NULL) {
885 if (rth->rt_genid == atomic_read(&rt_genid) && 922 if (!rt_is_expired(rth) &&
886 !rt_may_expire(rth, tmo, expire)) { 923 !rt_may_expire(rth, tmo, expire)) {
887 tmo >>= 1; 924 tmo >>= 1;
888 rthp = &rth->u.dst.rt_next; 925 rthp = &rth->u.dst.rt_next;
@@ -964,7 +1001,7 @@ restart:
964 1001
965 spin_lock_bh(rt_hash_lock_addr(hash)); 1002 spin_lock_bh(rt_hash_lock_addr(hash));
966 while ((rth = *rthp) != NULL) { 1003 while ((rth = *rthp) != NULL) {
967 if (rth->rt_genid != atomic_read(&rt_genid)) { 1004 if (rt_is_expired(rth)) {
968 *rthp = rth->u.dst.rt_next; 1005 *rthp = rth->u.dst.rt_next;
969 rt_free(rth); 1006 rt_free(rth);
970 continue; 1007 continue;
@@ -1140,7 +1177,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
1140 spin_lock_bh(rt_hash_lock_addr(hash)); 1177 spin_lock_bh(rt_hash_lock_addr(hash));
1141 ip_rt_put(rt); 1178 ip_rt_put(rt);
1142 while ((aux = *rthp) != NULL) { 1179 while ((aux = *rthp) != NULL) {
1143 if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) { 1180 if (aux == rt || rt_is_expired(aux)) {
1144 *rthp = aux->u.dst.rt_next; 1181 *rthp = aux->u.dst.rt_next;
1145 rt_free(aux); 1182 rt_free(aux);
1146 continue; 1183 continue;
@@ -1182,7 +1219,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1182 1219
1183 for (i = 0; i < 2; i++) { 1220 for (i = 0; i < 2; i++) {
1184 for (k = 0; k < 2; k++) { 1221 for (k = 0; k < 2; k++) {
1185 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); 1222 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1223 rt_genid(net));
1186 1224
1187 rthp=&rt_hash_table[hash].chain; 1225 rthp=&rt_hash_table[hash].chain;
1188 1226
@@ -1194,7 +1232,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1194 rth->fl.fl4_src != skeys[i] || 1232 rth->fl.fl4_src != skeys[i] ||
1195 rth->fl.oif != ikeys[k] || 1233 rth->fl.oif != ikeys[k] ||
1196 rth->fl.iif != 0 || 1234 rth->fl.iif != 0 ||
1197 rth->rt_genid != atomic_read(&rt_genid) || 1235 rt_is_expired(rth) ||
1198 !net_eq(dev_net(rth->u.dst.dev), net)) { 1236 !net_eq(dev_net(rth->u.dst.dev), net)) {
1199 rthp = &rth->u.dst.rt_next; 1237 rthp = &rth->u.dst.rt_next;
1200 continue; 1238 continue;
@@ -1233,7 +1271,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1233 rt->u.dst.neighbour = NULL; 1271 rt->u.dst.neighbour = NULL;
1234 rt->u.dst.hh = NULL; 1272 rt->u.dst.hh = NULL;
1235 rt->u.dst.xfrm = NULL; 1273 rt->u.dst.xfrm = NULL;
1236 rt->rt_genid = atomic_read(&rt_genid); 1274 rt->rt_genid = rt_genid(net);
1237 rt->rt_flags |= RTCF_REDIRECTED; 1275 rt->rt_flags |= RTCF_REDIRECTED;
1238 1276
1239 /* Gateway is different ... */ 1277 /* Gateway is different ... */
@@ -1297,7 +1335,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1297 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1335 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1298 rt->u.dst.expires) { 1336 rt->u.dst.expires) {
1299 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1337 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1300 rt->fl.oif); 1338 rt->fl.oif,
1339 rt_genid(dev_net(dst->dev)));
1301#if RT_CACHE_DEBUG >= 1 1340#if RT_CACHE_DEBUG >= 1
1302 printk(KERN_DEBUG "ipv4_negative_advice: redirect to " 1341 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1303 NIPQUAD_FMT "/%02x dropped\n", 1342 NIPQUAD_FMT "/%02x dropped\n",
@@ -1390,7 +1429,8 @@ static int ip_error(struct sk_buff *skb)
1390 break; 1429 break;
1391 case ENETUNREACH: 1430 case ENETUNREACH:
1392 code = ICMP_NET_UNREACH; 1431 code = ICMP_NET_UNREACH;
1393 IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES); 1432 IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
1433 IPSTATS_MIB_INNOROUTES);
1394 break; 1434 break;
1395 case EACCES: 1435 case EACCES:
1396 code = ICMP_PKT_FILTERED; 1436 code = ICMP_PKT_FILTERED;
@@ -1446,7 +1486,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1446 1486
1447 for (k = 0; k < 2; k++) { 1487 for (k = 0; k < 2; k++) {
1448 for (i = 0; i < 2; i++) { 1488 for (i = 0; i < 2; i++) {
1449 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); 1489 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1490 rt_genid(net));
1450 1491
1451 rcu_read_lock(); 1492 rcu_read_lock();
1452 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1493 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -1461,7 +1502,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1461 rth->fl.iif != 0 || 1502 rth->fl.iif != 0 ||
1462 dst_metric_locked(&rth->u.dst, RTAX_MTU) || 1503 dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
1463 !net_eq(dev_net(rth->u.dst.dev), net) || 1504 !net_eq(dev_net(rth->u.dst.dev), net) ||
1464 rth->rt_genid != atomic_read(&rt_genid)) 1505 !rt_is_expired(rth))
1465 continue; 1506 continue;
1466 1507
1467 if (new_mtu < 68 || new_mtu >= old_mtu) { 1508 if (new_mtu < 68 || new_mtu >= old_mtu) {
@@ -1696,7 +1737,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1696 rth->fl.oif = 0; 1737 rth->fl.oif = 0;
1697 rth->rt_gateway = daddr; 1738 rth->rt_gateway = daddr;
1698 rth->rt_spec_dst= spec_dst; 1739 rth->rt_spec_dst= spec_dst;
1699 rth->rt_genid = atomic_read(&rt_genid); 1740 rth->rt_genid = rt_genid(dev_net(dev));
1700 rth->rt_flags = RTCF_MULTICAST; 1741 rth->rt_flags = RTCF_MULTICAST;
1701 rth->rt_type = RTN_MULTICAST; 1742 rth->rt_type = RTN_MULTICAST;
1702 if (our) { 1743 if (our) {
@@ -1711,7 +1752,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1711 RT_CACHE_STAT_INC(in_slow_mc); 1752 RT_CACHE_STAT_INC(in_slow_mc);
1712 1753
1713 in_dev_put(in_dev); 1754 in_dev_put(in_dev);
1714 hash = rt_hash(daddr, saddr, dev->ifindex); 1755 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1715 return rt_intern_hash(hash, rth, &skb->rtable); 1756 return rt_intern_hash(hash, rth, &skb->rtable);
1716 1757
1717e_nobufs: 1758e_nobufs:
@@ -1837,7 +1878,7 @@ static int __mkroute_input(struct sk_buff *skb,
1837 1878
1838 rth->u.dst.input = ip_forward; 1879 rth->u.dst.input = ip_forward;
1839 rth->u.dst.output = ip_output; 1880 rth->u.dst.output = ip_output;
1840 rth->rt_genid = atomic_read(&rt_genid); 1881 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
1841 1882
1842 rt_set_nexthop(rth, res, itag); 1883 rt_set_nexthop(rth, res, itag);
1843 1884
@@ -1872,7 +1913,8 @@ static int ip_mkroute_input(struct sk_buff *skb,
1872 return err; 1913 return err;
1873 1914
1874 /* put it into the cache */ 1915 /* put it into the cache */
1875 hash = rt_hash(daddr, saddr, fl->iif); 1916 hash = rt_hash(daddr, saddr, fl->iif,
1917 rt_genid(dev_net(rth->u.dst.dev)));
1876 return rt_intern_hash(hash, rth, &skb->rtable); 1918 return rt_intern_hash(hash, rth, &skb->rtable);
1877} 1919}
1878 1920
@@ -1998,7 +2040,7 @@ local_input:
1998 goto e_nobufs; 2040 goto e_nobufs;
1999 2041
2000 rth->u.dst.output= ip_rt_bug; 2042 rth->u.dst.output= ip_rt_bug;
2001 rth->rt_genid = atomic_read(&rt_genid); 2043 rth->rt_genid = rt_genid(net);
2002 2044
2003 atomic_set(&rth->u.dst.__refcnt, 1); 2045 atomic_set(&rth->u.dst.__refcnt, 1);
2004 rth->u.dst.flags= DST_HOST; 2046 rth->u.dst.flags= DST_HOST;
@@ -2028,7 +2070,7 @@ local_input:
2028 rth->rt_flags &= ~RTCF_LOCAL; 2070 rth->rt_flags &= ~RTCF_LOCAL;
2029 } 2071 }
2030 rth->rt_type = res.type; 2072 rth->rt_type = res.type;
2031 hash = rt_hash(daddr, saddr, fl.iif); 2073 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2032 err = rt_intern_hash(hash, rth, &skb->rtable); 2074 err = rt_intern_hash(hash, rth, &skb->rtable);
2033 goto done; 2075 goto done;
2034 2076
@@ -2079,7 +2121,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2079 2121
2080 net = dev_net(dev); 2122 net = dev_net(dev);
2081 tos &= IPTOS_RT_MASK; 2123 tos &= IPTOS_RT_MASK;
2082 hash = rt_hash(daddr, saddr, iif); 2124 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2083 2125
2084 rcu_read_lock(); 2126 rcu_read_lock();
2085 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2127 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2091,7 +2133,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2091 (rth->fl.fl4_tos ^ tos)) == 0 && 2133 (rth->fl.fl4_tos ^ tos)) == 0 &&
2092 rth->fl.mark == skb->mark && 2134 rth->fl.mark == skb->mark &&
2093 net_eq(dev_net(rth->u.dst.dev), net) && 2135 net_eq(dev_net(rth->u.dst.dev), net) &&
2094 rth->rt_genid == atomic_read(&rt_genid)) { 2136 !rt_is_expired(rth)) {
2095 dst_use(&rth->u.dst, jiffies); 2137 dst_use(&rth->u.dst, jiffies);
2096 RT_CACHE_STAT_INC(in_hit); 2138 RT_CACHE_STAT_INC(in_hit);
2097 rcu_read_unlock(); 2139 rcu_read_unlock();
@@ -2219,7 +2261,7 @@ static int __mkroute_output(struct rtable **result,
2219 rth->rt_spec_dst= fl->fl4_src; 2261 rth->rt_spec_dst= fl->fl4_src;
2220 2262
2221 rth->u.dst.output=ip_output; 2263 rth->u.dst.output=ip_output;
2222 rth->rt_genid = atomic_read(&rt_genid); 2264 rth->rt_genid = rt_genid(dev_net(dev_out));
2223 2265
2224 RT_CACHE_STAT_INC(out_slow_tot); 2266 RT_CACHE_STAT_INC(out_slow_tot);
2225 2267
@@ -2268,7 +2310,8 @@ static int ip_mkroute_output(struct rtable **rp,
2268 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); 2310 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2269 unsigned hash; 2311 unsigned hash;
2270 if (err == 0) { 2312 if (err == 0) {
2271 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); 2313 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2314 rt_genid(dev_net(dev_out)));
2272 err = rt_intern_hash(hash, rth, rp); 2315 err = rt_intern_hash(hash, rth, rp);
2273 } 2316 }
2274 2317
@@ -2480,7 +2523,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2480 unsigned hash; 2523 unsigned hash;
2481 struct rtable *rth; 2524 struct rtable *rth;
2482 2525
2483 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif); 2526 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2484 2527
2485 rcu_read_lock_bh(); 2528 rcu_read_lock_bh();
2486 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2529 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2493,7 +2536,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2493 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2536 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2494 (IPTOS_RT_MASK | RTO_ONLINK)) && 2537 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2495 net_eq(dev_net(rth->u.dst.dev), net) && 2538 net_eq(dev_net(rth->u.dst.dev), net) &&
2496 rth->rt_genid == atomic_read(&rt_genid)) { 2539 !rt_is_expired(rth)) {
2497 dst_use(&rth->u.dst, jiffies); 2540 dst_use(&rth->u.dst, jiffies);
2498 RT_CACHE_STAT_INC(out_hit); 2541 RT_CACHE_STAT_INC(out_hit);
2499 rcu_read_unlock_bh(); 2542 rcu_read_unlock_bh();
@@ -2524,7 +2567,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2524}; 2567};
2525 2568
2526 2569
2527static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp) 2570static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2528{ 2571{
2529 struct rtable *ort = *rp; 2572 struct rtable *ort = *rp;
2530 struct rtable *rt = (struct rtable *) 2573 struct rtable *rt = (struct rtable *)
@@ -2548,7 +2591,7 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp)
2548 rt->idev = ort->idev; 2591 rt->idev = ort->idev;
2549 if (rt->idev) 2592 if (rt->idev)
2550 in_dev_hold(rt->idev); 2593 in_dev_hold(rt->idev);
2551 rt->rt_genid = atomic_read(&rt_genid); 2594 rt->rt_genid = rt_genid(net);
2552 rt->rt_flags = ort->rt_flags; 2595 rt->rt_flags = ort->rt_flags;
2553 rt->rt_type = ort->rt_type; 2596 rt->rt_type = ort->rt_type;
2554 rt->rt_dst = ort->rt_dst; 2597 rt->rt_dst = ort->rt_dst;
@@ -2584,7 +2627,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2584 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, 2627 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
2585 flags ? XFRM_LOOKUP_WAIT : 0); 2628 flags ? XFRM_LOOKUP_WAIT : 0);
2586 if (err == -EREMOTE) 2629 if (err == -EREMOTE)
2587 err = ipv4_dst_blackhole(rp, flp); 2630 err = ipv4_dst_blackhole(net, rp, flp);
2588 2631
2589 return err; 2632 return err;
2590 } 2633 }
@@ -2803,7 +2846,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2803 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2846 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2804 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 2847 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
2805 continue; 2848 continue;
2806 if (rt->rt_genid != atomic_read(&rt_genid)) 2849 if (rt_is_expired(rt))
2807 continue; 2850 continue;
2808 skb->dst = dst_clone(&rt->u.dst); 2851 skb->dst = dst_clone(&rt->u.dst);
2809 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2852 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -2827,19 +2870,25 @@ done:
2827 2870
2828void ip_rt_multicast_event(struct in_device *in_dev) 2871void ip_rt_multicast_event(struct in_device *in_dev)
2829{ 2872{
2830 rt_cache_flush(0); 2873 rt_cache_flush(dev_net(in_dev->dev), 0);
2831} 2874}
2832 2875
2833#ifdef CONFIG_SYSCTL 2876#ifdef CONFIG_SYSCTL
2834static int flush_delay; 2877static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2835
2836static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2837 struct file *filp, void __user *buffer, 2878 struct file *filp, void __user *buffer,
2838 size_t *lenp, loff_t *ppos) 2879 size_t *lenp, loff_t *ppos)
2839{ 2880{
2840 if (write) { 2881 if (write) {
2841 proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 2882 int flush_delay;
2842 rt_cache_flush(flush_delay); 2883 ctl_table ctl;
2884 struct net *net;
2885
2886 memcpy(&ctl, __ctl, sizeof(ctl));
2887 ctl.data = &flush_delay;
2888 proc_dointvec(&ctl, write, filp, buffer, lenp, ppos);
2889
2890 net = (struct net *)__ctl->extra1;
2891 rt_cache_flush(net, flush_delay);
2843 return 0; 2892 return 0;
2844 } 2893 }
2845 2894
@@ -2855,25 +2904,18 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2855 size_t newlen) 2904 size_t newlen)
2856{ 2905{
2857 int delay; 2906 int delay;
2907 struct net *net;
2858 if (newlen != sizeof(int)) 2908 if (newlen != sizeof(int))
2859 return -EINVAL; 2909 return -EINVAL;
2860 if (get_user(delay, (int __user *)newval)) 2910 if (get_user(delay, (int __user *)newval))
2861 return -EFAULT; 2911 return -EFAULT;
2862 rt_cache_flush(delay); 2912 net = (struct net *)table->extra1;
2913 rt_cache_flush(net, delay);
2863 return 0; 2914 return 0;
2864} 2915}
2865 2916
2866ctl_table ipv4_route_table[] = { 2917ctl_table ipv4_route_table[] = {
2867 { 2918 {
2868 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2869 .procname = "flush",
2870 .data = &flush_delay,
2871 .maxlen = sizeof(int),
2872 .mode = 0200,
2873 .proc_handler = &ipv4_sysctl_rtcache_flush,
2874 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2875 },
2876 {
2877 .ctl_name = NET_IPV4_ROUTE_GC_THRESH, 2919 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2878 .procname = "gc_thresh", 2920 .procname = "gc_thresh",
2879 .data = &ipv4_dst_ops.gc_thresh, 2921 .data = &ipv4_dst_ops.gc_thresh,
@@ -3011,8 +3053,97 @@ ctl_table ipv4_route_table[] = {
3011 }, 3053 },
3012 { .ctl_name = 0 } 3054 { .ctl_name = 0 }
3013}; 3055};
3056
3057static __net_initdata struct ctl_path ipv4_route_path[] = {
3058 { .procname = "net", .ctl_name = CTL_NET, },
3059 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3060 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3061 { },
3062};
3063
3064
3065static struct ctl_table ipv4_route_flush_table[] = {
3066 {
3067 .ctl_name = NET_IPV4_ROUTE_FLUSH,
3068 .procname = "flush",
3069 .maxlen = sizeof(int),
3070 .mode = 0200,
3071 .proc_handler = &ipv4_sysctl_rtcache_flush,
3072 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
3073 },
3074 { .ctl_name = 0 },
3075};
3076
3077static __net_init int sysctl_route_net_init(struct net *net)
3078{
3079 struct ctl_table *tbl;
3080
3081 tbl = ipv4_route_flush_table;
3082 if (net != &init_net) {
3083 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3084 if (tbl == NULL)
3085 goto err_dup;
3086 }
3087 tbl[0].extra1 = net;
3088
3089 net->ipv4.route_hdr =
3090 register_net_sysctl_table(net, ipv4_route_path, tbl);
3091 if (net->ipv4.route_hdr == NULL)
3092 goto err_reg;
3093 return 0;
3094
3095err_reg:
3096 if (tbl != ipv4_route_flush_table)
3097 kfree(tbl);
3098err_dup:
3099 return -ENOMEM;
3100}
3101
3102static __net_exit void sysctl_route_net_exit(struct net *net)
3103{
3104 struct ctl_table *tbl;
3105
3106 tbl = net->ipv4.route_hdr->ctl_table_arg;
3107 unregister_net_sysctl_table(net->ipv4.route_hdr);
3108 BUG_ON(tbl == ipv4_route_flush_table);
3109 kfree(tbl);
3110}
3111
3112static __net_initdata struct pernet_operations sysctl_route_ops = {
3113 .init = sysctl_route_net_init,
3114 .exit = sysctl_route_net_exit,
3115};
3014#endif 3116#endif
3015 3117
3118
3119static __net_init int rt_secret_timer_init(struct net *net)
3120{
3121 atomic_set(&net->ipv4.rt_genid,
3122 (int) ((num_physpages ^ (num_physpages>>8)) ^
3123 (jiffies ^ (jiffies >> 7))));
3124
3125 net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3126 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3127 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3128
3129 net->ipv4.rt_secret_timer.expires =
3130 jiffies + net_random() % ip_rt_secret_interval +
3131 ip_rt_secret_interval;
3132 add_timer(&net->ipv4.rt_secret_timer);
3133 return 0;
3134}
3135
3136static __net_exit void rt_secret_timer_exit(struct net *net)
3137{
3138 del_timer_sync(&net->ipv4.rt_secret_timer);
3139}
3140
3141static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3142 .init = rt_secret_timer_init,
3143 .exit = rt_secret_timer_exit,
3144};
3145
3146
3016#ifdef CONFIG_NET_CLS_ROUTE 3147#ifdef CONFIG_NET_CLS_ROUTE
3017struct ip_rt_acct *ip_rt_acct __read_mostly; 3148struct ip_rt_acct *ip_rt_acct __read_mostly;
3018#endif /* CONFIG_NET_CLS_ROUTE */ 3149#endif /* CONFIG_NET_CLS_ROUTE */
@@ -3031,9 +3162,6 @@ int __init ip_rt_init(void)
3031{ 3162{
3032 int rc = 0; 3163 int rc = 0;
3033 3164
3034 atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^
3035 (jiffies ^ (jiffies >> 7))));
3036
3037#ifdef CONFIG_NET_CLS_ROUTE 3165#ifdef CONFIG_NET_CLS_ROUTE
3038 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); 3166 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
3039 if (!ip_rt_acct) 3167 if (!ip_rt_acct)
@@ -3065,19 +3193,14 @@ int __init ip_rt_init(void)
3065 devinet_init(); 3193 devinet_init();
3066 ip_fib_init(); 3194 ip_fib_init();
3067 3195
3068 rt_secret_timer.function = rt_secret_rebuild;
3069 rt_secret_timer.data = 0;
3070 init_timer_deferrable(&rt_secret_timer);
3071
3072 /* All the timers, started at system startup tend 3196 /* All the timers, started at system startup tend
3073 to synchronize. Perturb it a bit. 3197 to synchronize. Perturb it a bit.
3074 */ 3198 */
3075 schedule_delayed_work(&expires_work, 3199 schedule_delayed_work(&expires_work,
3076 net_random() % ip_rt_gc_interval + ip_rt_gc_interval); 3200 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3077 3201
3078 rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval + 3202 if (register_pernet_subsys(&rt_secret_timer_ops))
3079 ip_rt_secret_interval; 3203 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3080 add_timer(&rt_secret_timer);
3081 3204
3082 if (ip_rt_proc_init()) 3205 if (ip_rt_proc_init())
3083 printk(KERN_ERR "Unable to create route proc files\n"); 3206 printk(KERN_ERR "Unable to create route proc files\n");
@@ -3087,6 +3210,9 @@ int __init ip_rt_init(void)
3087#endif 3210#endif
3088 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); 3211 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3089 3212
3213#ifdef CONFIG_SYSCTL
3214 register_pernet_subsys(&sysctl_route_ops);
3215#endif
3090 return rc; 3216 return rc;
3091} 3217}
3092 3218
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index d182a2a26291..51bc24d3b8a7 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -8,8 +8,6 @@
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 *
12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $
13 */ 11 */
14 12
15#include <linux/tcp.h> 13#include <linux/tcp.h>
@@ -175,7 +173,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
175 ; 173 ;
176 *mssp = msstab[mssind] + 1; 174 *mssp = msstab[mssind] + 1;
177 175
178 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); 176 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
179 177
180 return secure_tcp_syn_cookie(iph->saddr, iph->daddr, 178 return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
181 th->source, th->dest, ntohl(th->seq), 179 th->source, th->dest, ntohl(th->seq),
@@ -271,11 +269,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
271 269
272 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 270 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
273 (mss = cookie_check(skb, cookie)) == 0) { 271 (mss = cookie_check(skb, cookie)) == 0) {
274 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); 272 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
275 goto out; 273 goto out;
276 } 274 }
277 275
278 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); 276 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
279 277
280 /* check for timestamp cookie support */ 278 /* check for timestamp cookie support */
281 memset(&tcp_opt, 0, sizeof(tcp_opt)); 279 memset(&tcp_opt, 0, sizeof(tcp_opt));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index c437f804ee38..14ef202a2254 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem. 2 * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem.
3 * 3 *
4 * $Id: sysctl_net_ipv4.c,v 1.50 2001/10/20 00:00:11 davem Exp $
5 *
6 * Begun April 1, 1996, Mike Shaver. 4 * Begun April 1, 1996, Mike Shaver.
7 * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS] 5 * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS]
8 */ 6 */
@@ -795,7 +793,8 @@ static struct ctl_table ipv4_net_table[] = {
795 .data = &init_net.ipv4.sysctl_icmp_ratelimit, 793 .data = &init_net.ipv4.sysctl_icmp_ratelimit,
796 .maxlen = sizeof(int), 794 .maxlen = sizeof(int),
797 .mode = 0644, 795 .mode = 0644,
798 .proc_handler = &proc_dointvec 796 .proc_handler = &proc_dointvec_ms_jiffies,
797 .strategy = &sysctl_ms_jiffies
799 }, 798 },
800 { 799 {
801 .ctl_name = NET_IPV4_ICMP_RATEMASK, 800 .ctl_name = NET_IPV4_ICMP_RATEMASK,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1d723de18686..eec8cf7c0247 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -318,10 +316,10 @@ int tcp_memory_pressure __read_mostly;
318 316
319EXPORT_SYMBOL(tcp_memory_pressure); 317EXPORT_SYMBOL(tcp_memory_pressure);
320 318
321void tcp_enter_memory_pressure(void) 319void tcp_enter_memory_pressure(struct sock *sk)
322{ 320{
323 if (!tcp_memory_pressure) { 321 if (!tcp_memory_pressure) {
324 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); 322 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
325 tcp_memory_pressure = 1; 323 tcp_memory_pressure = 1;
326 } 324 }
327} 325}
@@ -346,8 +344,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
346 return inet_csk_listen_poll(sk); 344 return inet_csk_listen_poll(sk);
347 345
348 /* Socket is not locked. We are protected from async events 346 /* Socket is not locked. We are protected from async events
349 by poll logic and correct handling of state changes 347 * by poll logic and correct handling of state changes
350 made by another threads is impossible in any case. 348 * made by other threads is impossible in any case.
351 */ 349 */
352 350
353 mask = 0; 351 mask = 0;
@@ -373,10 +371,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
373 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 371 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
374 * if and only if shutdown has been made in both directions. 372 * if and only if shutdown has been made in both directions.
375 * Actually, it is interesting to look how Solaris and DUX 373 * Actually, it is interesting to look how Solaris and DUX
376 * solve this dilemma. I would prefer, if PULLHUP were maskable, 374 * solve this dilemma. I would prefer, if POLLHUP were maskable,
377 * then we could set it on SND_SHUTDOWN. BTW examples given 375 * then we could set it on SND_SHUTDOWN. BTW examples given
378 * in Stevens' books assume exactly this behaviour, it explains 376 * in Stevens' books assume exactly this behaviour, it explains
379 * why PULLHUP is incompatible with POLLOUT. --ANK 377 * why POLLHUP is incompatible with POLLOUT. --ANK
380 * 378 *
381 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 379 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
382 * blocking on fresh not-connected or disconnected socket. --ANK 380 * blocking on fresh not-connected or disconnected socket. --ANK
@@ -651,7 +649,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
651 } 649 }
652 __kfree_skb(skb); 650 __kfree_skb(skb);
653 } else { 651 } else {
654 sk->sk_prot->enter_memory_pressure(); 652 sk->sk_prot->enter_memory_pressure(sk);
655 sk_stream_moderate_sndbuf(sk); 653 sk_stream_moderate_sndbuf(sk);
656 } 654 }
657 return NULL; 655 return NULL;
@@ -1155,7 +1153,7 @@ static void tcp_prequeue_process(struct sock *sk)
1155 struct sk_buff *skb; 1153 struct sk_buff *skb;
1156 struct tcp_sock *tp = tcp_sk(sk); 1154 struct tcp_sock *tp = tcp_sk(sk);
1157 1155
1158 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); 1156 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1159 1157
1160 /* RX process wants to run with disabled BHs, though it is not 1158 /* RX process wants to run with disabled BHs, though it is not
1161 * necessary */ 1159 * necessary */
@@ -1477,7 +1475,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1477 /* __ Restore normal policy in scheduler __ */ 1475 /* __ Restore normal policy in scheduler __ */
1478 1476
1479 if ((chunk = len - tp->ucopy.len) != 0) { 1477 if ((chunk = len - tp->ucopy.len) != 0) {
1480 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1478 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1481 len -= chunk; 1479 len -= chunk;
1482 copied += chunk; 1480 copied += chunk;
1483 } 1481 }
@@ -1488,7 +1486,7 @@ do_prequeue:
1488 tcp_prequeue_process(sk); 1486 tcp_prequeue_process(sk);
1489 1487
1490 if ((chunk = len - tp->ucopy.len) != 0) { 1488 if ((chunk = len - tp->ucopy.len) != 0) {
1491 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1489 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1492 len -= chunk; 1490 len -= chunk;
1493 copied += chunk; 1491 copied += chunk;
1494 } 1492 }
@@ -1603,7 +1601,7 @@ skip_copy:
1603 tcp_prequeue_process(sk); 1601 tcp_prequeue_process(sk);
1604 1602
1605 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1603 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1606 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1604 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1607 len -= chunk; 1605 len -= chunk;
1608 copied += chunk; 1606 copied += chunk;
1609 } 1607 }
@@ -1670,12 +1668,12 @@ void tcp_set_state(struct sock *sk, int state)
1670 switch (state) { 1668 switch (state) {
1671 case TCP_ESTABLISHED: 1669 case TCP_ESTABLISHED:
1672 if (oldstate != TCP_ESTABLISHED) 1670 if (oldstate != TCP_ESTABLISHED)
1673 TCP_INC_STATS(TCP_MIB_CURRESTAB); 1671 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1674 break; 1672 break;
1675 1673
1676 case TCP_CLOSE: 1674 case TCP_CLOSE:
1677 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 1675 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1678 TCP_INC_STATS(TCP_MIB_ESTABRESETS); 1676 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1679 1677
1680 sk->sk_prot->unhash(sk); 1678 sk->sk_prot->unhash(sk);
1681 if (inet_csk(sk)->icsk_bind_hash && 1679 if (inet_csk(sk)->icsk_bind_hash &&
@@ -1684,7 +1682,7 @@ void tcp_set_state(struct sock *sk, int state)
1684 /* fall through */ 1682 /* fall through */
1685 default: 1683 default:
1686 if (oldstate==TCP_ESTABLISHED) 1684 if (oldstate==TCP_ESTABLISHED)
1687 TCP_DEC_STATS(TCP_MIB_CURRESTAB); 1685 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1688 } 1686 }
1689 1687
1690 /* Change state AFTER socket is unhashed to avoid closed 1688 /* Change state AFTER socket is unhashed to avoid closed
@@ -1795,13 +1793,13 @@ void tcp_close(struct sock *sk, long timeout)
1795 */ 1793 */
1796 if (data_was_unread) { 1794 if (data_was_unread) {
1797 /* Unread data was tossed, zap the connection. */ 1795 /* Unread data was tossed, zap the connection. */
1798 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); 1796 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1799 tcp_set_state(sk, TCP_CLOSE); 1797 tcp_set_state(sk, TCP_CLOSE);
1800 tcp_send_active_reset(sk, GFP_KERNEL); 1798 tcp_send_active_reset(sk, GFP_KERNEL);
1801 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 1799 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1802 /* Check zero linger _after_ checking for unread data. */ 1800 /* Check zero linger _after_ checking for unread data. */
1803 sk->sk_prot->disconnect(sk, 0); 1801 sk->sk_prot->disconnect(sk, 0);
1804 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); 1802 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
1805 } else if (tcp_close_state(sk)) { 1803 } else if (tcp_close_state(sk)) {
1806 /* We FIN if the application ate all the data before 1804 /* We FIN if the application ate all the data before
1807 * zapping the connection. 1805 * zapping the connection.
@@ -1873,7 +1871,8 @@ adjudge_to_death:
1873 if (tp->linger2 < 0) { 1871 if (tp->linger2 < 0) {
1874 tcp_set_state(sk, TCP_CLOSE); 1872 tcp_set_state(sk, TCP_CLOSE);
1875 tcp_send_active_reset(sk, GFP_ATOMIC); 1873 tcp_send_active_reset(sk, GFP_ATOMIC);
1876 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); 1874 NET_INC_STATS_BH(sock_net(sk),
1875 LINUX_MIB_TCPABORTONLINGER);
1877 } else { 1876 } else {
1878 const int tmo = tcp_fin_time(sk); 1877 const int tmo = tcp_fin_time(sk);
1879 1878
@@ -1895,7 +1894,8 @@ adjudge_to_death:
1895 "sockets\n"); 1894 "sockets\n");
1896 tcp_set_state(sk, TCP_CLOSE); 1895 tcp_set_state(sk, TCP_CLOSE);
1897 tcp_send_active_reset(sk, GFP_ATOMIC); 1896 tcp_send_active_reset(sk, GFP_ATOMIC);
1898 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 1897 NET_INC_STATS_BH(sock_net(sk),
1898 LINUX_MIB_TCPABORTONMEMORY);
1899 } 1899 }
1900 } 1900 }
1901 1901
@@ -2467,6 +2467,76 @@ static unsigned long tcp_md5sig_users;
2467static struct tcp_md5sig_pool **tcp_md5sig_pool; 2467static struct tcp_md5sig_pool **tcp_md5sig_pool;
2468static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2468static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2469 2469
2470int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
2471 int bplen,
2472 struct tcphdr *th, unsigned int tcplen,
2473 struct tcp_md5sig_pool *hp)
2474{
2475 struct scatterlist sg[4];
2476 __u16 data_len;
2477 int block = 0;
2478 __sum16 cksum;
2479 struct hash_desc *desc = &hp->md5_desc;
2480 int err;
2481 unsigned int nbytes = 0;
2482
2483 sg_init_table(sg, 4);
2484
2485 /* 1. The TCP pseudo-header */
2486 sg_set_buf(&sg[block++], &hp->md5_blk, bplen);
2487 nbytes += bplen;
2488
2489 /* 2. The TCP header, excluding options, and assuming a
2490 * checksum of zero
2491 */
2492 cksum = th->check;
2493 th->check = 0;
2494 sg_set_buf(&sg[block++], th, sizeof(*th));
2495 nbytes += sizeof(*th);
2496
2497 /* 3. The TCP segment data (if any) */
2498 data_len = tcplen - (th->doff << 2);
2499 if (data_len > 0) {
2500 u8 *data = (u8 *)th + (th->doff << 2);
2501 sg_set_buf(&sg[block++], data, data_len);
2502 nbytes += data_len;
2503 }
2504
2505 /* 4. an independently-specified key or password, known to both
2506 * TCPs and presumably connection-specific
2507 */
2508 sg_set_buf(&sg[block++], key->key, key->keylen);
2509 nbytes += key->keylen;
2510
2511 sg_mark_end(&sg[block - 1]);
2512
2513 /* Now store the hash into the packet */
2514 err = crypto_hash_init(desc);
2515 if (err) {
2516 if (net_ratelimit())
2517 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
2518 return -1;
2519 }
2520 err = crypto_hash_update(desc, sg, nbytes);
2521 if (err) {
2522 if (net_ratelimit())
2523 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
2524 return -1;
2525 }
2526 err = crypto_hash_final(desc, md5_hash);
2527 if (err) {
2528 if (net_ratelimit())
2529 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
2530 return -1;
2531 }
2532
2533 /* Reset header */
2534 th->check = cksum;
2535
2536 return 0;
2537}
2538EXPORT_SYMBOL(tcp_calc_md5_hash);
2539
2470static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2540static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2471{ 2541{
2472 int cpu; 2542 int cpu;
@@ -2595,7 +2665,7 @@ EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2595void tcp_done(struct sock *sk) 2665void tcp_done(struct sock *sk)
2596{ 2666{
2597 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 2667 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2598 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 2668 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
2599 2669
2600 tcp_set_state(sk, TCP_CLOSE); 2670 tcp_set_state(sk, TCP_CLOSE);
2601 tcp_clear_xmit_timers(sk); 2671 tcp_clear_xmit_timers(sk);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 2fbcc7d1b1a0..838d491dfda7 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * tcp_diag.c Module for monitoring TCP transport protocols sockets. 2 * tcp_diag.c Module for monitoring TCP transport protocols sockets.
3 * 3 *
4 * Version: $Id: tcp_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 *
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cad73b7dfef0..fac49a6e1611 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -949,17 +947,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
949{ 947{
950 struct tcp_sock *tp = tcp_sk(sk); 948 struct tcp_sock *tp = tcp_sk(sk);
951 if (metric > tp->reordering) { 949 if (metric > tp->reordering) {
950 int mib_idx;
951
952 tp->reordering = min(TCP_MAX_REORDERING, metric); 952 tp->reordering = min(TCP_MAX_REORDERING, metric);
953 953
954 /* This exciting event is worth to be remembered. 8) */ 954 /* This exciting event is worth to be remembered. 8) */
955 if (ts) 955 if (ts)
956 NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); 956 mib_idx = LINUX_MIB_TCPTSREORDER;
957 else if (tcp_is_reno(tp)) 957 else if (tcp_is_reno(tp))
958 NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); 958 mib_idx = LINUX_MIB_TCPRENOREORDER;
959 else if (tcp_is_fack(tp)) 959 else if (tcp_is_fack(tp))
960 NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); 960 mib_idx = LINUX_MIB_TCPFACKREORDER;
961 else 961 else
962 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 962 mib_idx = LINUX_MIB_TCPSACKREORDER;
963
964 NET_INC_STATS_BH(sock_net(sk), mib_idx);
963#if FASTRETRANS_DEBUG > 1 965#if FASTRETRANS_DEBUG > 1
964 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
965 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1155,7 +1157,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1155 tp->lost_out += tcp_skb_pcount(skb); 1157 tp->lost_out += tcp_skb_pcount(skb);
1156 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1158 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1157 } 1159 }
1158 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1159 } else { 1161 } else {
1160 if (before(ack_seq, new_low_seq)) 1162 if (before(ack_seq, new_low_seq))
1161 new_low_seq = ack_seq; 1163 new_low_seq = ack_seq;
@@ -1167,10 +1169,11 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1167 tp->lost_retrans_low = new_low_seq; 1169 tp->lost_retrans_low = new_low_seq;
1168} 1170}
1169 1171
1170static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, 1172static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1171 struct tcp_sack_block_wire *sp, int num_sacks, 1173 struct tcp_sack_block_wire *sp, int num_sacks,
1172 u32 prior_snd_una) 1174 u32 prior_snd_una)
1173{ 1175{
1176 struct tcp_sock *tp = tcp_sk(sk);
1174 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1177 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1175 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1178 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1176 int dup_sack = 0; 1179 int dup_sack = 0;
@@ -1178,7 +1181,7 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1178 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1181 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1179 dup_sack = 1; 1182 dup_sack = 1;
1180 tcp_dsack_seen(tp); 1183 tcp_dsack_seen(tp);
1181 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 1184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1182 } else if (num_sacks > 1) { 1185 } else if (num_sacks > 1) {
1183 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1186 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1184 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1187 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1187,7 +1190,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1187 !before(start_seq_0, start_seq_1)) { 1190 !before(start_seq_0, start_seq_1)) {
1188 dup_sack = 1; 1191 dup_sack = 1;
1189 tcp_dsack_seen(tp); 1192 tcp_dsack_seen(tp);
1190 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 1193 NET_INC_STATS_BH(sock_net(sk),
1194 LINUX_MIB_TCPDSACKOFORECV);
1191 } 1195 }
1192 } 1196 }
1193 1197
@@ -1432,7 +1436,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1432 tcp_highest_sack_reset(sk); 1436 tcp_highest_sack_reset(sk);
1433 } 1437 }
1434 1438
1435 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, 1439 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1436 num_sacks, prior_snd_una); 1440 num_sacks, prior_snd_una);
1437 if (found_dup_sack) 1441 if (found_dup_sack)
1438 flag |= FLAG_DSACKING_ACK; 1442 flag |= FLAG_DSACKING_ACK;
@@ -1458,18 +1462,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1458 if (!tcp_is_sackblock_valid(tp, dup_sack, 1462 if (!tcp_is_sackblock_valid(tp, dup_sack,
1459 sp[used_sacks].start_seq, 1463 sp[used_sacks].start_seq,
1460 sp[used_sacks].end_seq)) { 1464 sp[used_sacks].end_seq)) {
1465 int mib_idx;
1466
1461 if (dup_sack) { 1467 if (dup_sack) {
1462 if (!tp->undo_marker) 1468 if (!tp->undo_marker)
1463 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); 1469 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1464 else 1470 else
1465 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); 1471 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1466 } else { 1472 } else {
1467 /* Don't count olds caused by ACK reordering */ 1473 /* Don't count olds caused by ACK reordering */
1468 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1474 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1469 !after(sp[used_sacks].end_seq, tp->snd_una)) 1475 !after(sp[used_sacks].end_seq, tp->snd_una))
1470 continue; 1476 continue;
1471 NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); 1477 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1472 } 1478 }
1479
1480 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1473 if (i == 0) 1481 if (i == 0)
1474 first_sack_index = -1; 1482 first_sack_index = -1;
1475 continue; 1483 continue;
@@ -1962,7 +1970,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
1962{ 1970{
1963 if (flag & FLAG_SACK_RENEGING) { 1971 if (flag & FLAG_SACK_RENEGING) {
1964 struct inet_connection_sock *icsk = inet_csk(sk); 1972 struct inet_connection_sock *icsk = inet_csk(sk);
1965 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1973 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1966 1974
1967 tcp_enter_loss(sk, 1); 1975 tcp_enter_loss(sk, 1);
1968 icsk->icsk_retransmits++; 1976 icsk->icsk_retransmits++;
@@ -2382,15 +2390,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
2382 struct tcp_sock *tp = tcp_sk(sk); 2390 struct tcp_sock *tp = tcp_sk(sk);
2383 2391
2384 if (tcp_may_undo(tp)) { 2392 if (tcp_may_undo(tp)) {
2393 int mib_idx;
2394
2385 /* Happy end! We did not retransmit anything 2395 /* Happy end! We did not retransmit anything
2386 * or our original transmission succeeded. 2396 * or our original transmission succeeded.
2387 */ 2397 */
2388 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2398 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2389 tcp_undo_cwr(sk, 1); 2399 tcp_undo_cwr(sk, 1);
2390 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2400 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2391 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2401 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2392 else 2402 else
2393 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 2403 mib_idx = LINUX_MIB_TCPFULLUNDO;
2404
2405 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2394 tp->undo_marker = 0; 2406 tp->undo_marker = 0;
2395 } 2407 }
2396 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2408 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2413,7 +2425,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
2413 DBGUNDO(sk, "D-SACK"); 2425 DBGUNDO(sk, "D-SACK");
2414 tcp_undo_cwr(sk, 1); 2426 tcp_undo_cwr(sk, 1);
2415 tp->undo_marker = 0; 2427 tp->undo_marker = 0;
2416 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 2428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2417 } 2429 }
2418} 2430}
2419 2431
@@ -2436,7 +2448,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2436 2448
2437 DBGUNDO(sk, "Hoe"); 2449 DBGUNDO(sk, "Hoe");
2438 tcp_undo_cwr(sk, 0); 2450 tcp_undo_cwr(sk, 0);
2439 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 2451 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2440 2452
2441 /* So... Do not make Hoe's retransmit yet. 2453 /* So... Do not make Hoe's retransmit yet.
2442 * If the first packet was delayed, the rest 2454 * If the first packet was delayed, the rest
@@ -2465,7 +2477,7 @@ static int tcp_try_undo_loss(struct sock *sk)
2465 DBGUNDO(sk, "partial loss"); 2477 DBGUNDO(sk, "partial loss");
2466 tp->lost_out = 0; 2478 tp->lost_out = 0;
2467 tcp_undo_cwr(sk, 1); 2479 tcp_undo_cwr(sk, 1);
2468 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2469 inet_csk(sk)->icsk_retransmits = 0; 2481 inet_csk(sk)->icsk_retransmits = 0;
2470 tp->undo_marker = 0; 2482 tp->undo_marker = 0;
2471 if (tcp_is_sack(tp)) 2483 if (tcp_is_sack(tp))
@@ -2562,7 +2574,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2562 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 2574 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
2563 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2575 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2564 (tcp_fackets_out(tp) > tp->reordering)); 2576 (tcp_fackets_out(tp) > tp->reordering));
2565 int fast_rexmit = 0; 2577 int fast_rexmit = 0, mib_idx;
2566 2578
2567 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2579 if (WARN_ON(!tp->packets_out && tp->sacked_out))
2568 tp->sacked_out = 0; 2580 tp->sacked_out = 0;
@@ -2584,7 +2596,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2584 icsk->icsk_ca_state != TCP_CA_Open && 2596 icsk->icsk_ca_state != TCP_CA_Open &&
2585 tp->fackets_out > tp->reordering) { 2597 tp->fackets_out > tp->reordering) {
2586 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); 2598 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2587 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 2599 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2588 } 2600 }
2589 2601
2590 /* D. Check consistency of the current state. */ 2602 /* D. Check consistency of the current state. */
@@ -2685,9 +2697,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2685 /* Otherwise enter Recovery state */ 2697 /* Otherwise enter Recovery state */
2686 2698
2687 if (tcp_is_reno(tp)) 2699 if (tcp_is_reno(tp))
2688 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); 2700 mib_idx = LINUX_MIB_TCPRENORECOVERY;
2689 else 2701 else
2690 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); 2702 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2703
2704 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2691 2705
2692 tp->high_seq = tp->snd_nxt; 2706 tp->high_seq = tp->snd_nxt;
2693 tp->prior_ssthresh = 0; 2707 tp->prior_ssthresh = 0;
@@ -3198,7 +3212,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3198 } 3212 }
3199 tp->frto_counter = 0; 3213 tp->frto_counter = 0;
3200 tp->undo_marker = 0; 3214 tp->undo_marker = 0;
3201 NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); 3215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3202 } 3216 }
3203 return 0; 3217 return 0;
3204} 3218}
@@ -3251,12 +3265,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3251 3265
3252 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3266 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3253 3267
3254 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 3268 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3255 } else { 3269 } else {
3256 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3270 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3257 flag |= FLAG_DATA; 3271 flag |= FLAG_DATA;
3258 else 3272 else
3259 NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); 3273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3260 3274
3261 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3275 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3262 3276
@@ -3450,6 +3464,43 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3450 return 1; 3464 return 1;
3451} 3465}
3452 3466
3467#ifdef CONFIG_TCP_MD5SIG
3468/*
3469 * Parse MD5 Signature option
3470 */
3471u8 *tcp_parse_md5sig_option(struct tcphdr *th)
3472{
3473 int length = (th->doff << 2) - sizeof (*th);
3474 u8 *ptr = (u8*)(th + 1);
3475
3476 /* If the TCP option is too short, we can short cut */
3477 if (length < TCPOLEN_MD5SIG)
3478 return NULL;
3479
3480 while (length > 0) {
3481 int opcode = *ptr++;
3482 int opsize;
3483
3484 switch(opcode) {
3485 case TCPOPT_EOL:
3486 return NULL;
3487 case TCPOPT_NOP:
3488 length--;
3489 continue;
3490 default:
3491 opsize = *ptr++;
3492 if (opsize < 2 || opsize > length)
3493 return NULL;
3494 if (opcode == TCPOPT_MD5SIG)
3495 return ptr;
3496 }
3497 ptr += opsize - 2;
3498 length -= opsize;
3499 }
3500 return NULL;
3501}
3502#endif
3503
3453static inline void tcp_store_ts_recent(struct tcp_sock *tp) 3504static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3454{ 3505{
3455 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3506 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
@@ -3662,13 +3713,19 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
3662 return 0; 3713 return 0;
3663} 3714}
3664 3715
3665static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) 3716static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
3666{ 3717{
3718 struct tcp_sock *tp = tcp_sk(sk);
3719
3667 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3720 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3721 int mib_idx;
3722
3668 if (before(seq, tp->rcv_nxt)) 3723 if (before(seq, tp->rcv_nxt))
3669 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); 3724 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
3670 else 3725 else
3671 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); 3726 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3727
3728 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3672 3729
3673 tp->rx_opt.dsack = 1; 3730 tp->rx_opt.dsack = 1;
3674 tp->duplicate_sack[0].start_seq = seq; 3731 tp->duplicate_sack[0].start_seq = seq;
@@ -3678,10 +3735,12 @@ static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
3678 } 3735 }
3679} 3736}
3680 3737
3681static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) 3738static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
3682{ 3739{
3740 struct tcp_sock *tp = tcp_sk(sk);
3741
3683 if (!tp->rx_opt.dsack) 3742 if (!tp->rx_opt.dsack)
3684 tcp_dsack_set(tp, seq, end_seq); 3743 tcp_dsack_set(sk, seq, end_seq);
3685 else 3744 else
3686 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 3745 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
3687} 3746}
@@ -3692,7 +3751,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3692 3751
3693 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 3752 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3694 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3753 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3695 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 3754 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3696 tcp_enter_quickack_mode(sk); 3755 tcp_enter_quickack_mode(sk);
3697 3756
3698 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3757 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -3700,7 +3759,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3700 3759
3701 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 3760 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
3702 end_seq = tp->rcv_nxt; 3761 end_seq = tp->rcv_nxt;
3703 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq); 3762 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
3704 } 3763 }
3705 } 3764 }
3706 3765
@@ -3853,7 +3912,7 @@ static void tcp_ofo_queue(struct sock *sk)
3853 __u32 dsack = dsack_high; 3912 __u32 dsack = dsack_high;
3854 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 3913 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
3855 dsack_high = TCP_SKB_CB(skb)->end_seq; 3914 dsack_high = TCP_SKB_CB(skb)->end_seq;
3856 tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack); 3915 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
3857 } 3916 }
3858 3917
3859 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 3918 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
@@ -3981,8 +4040,8 @@ queue_and_out:
3981 4040
3982 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4041 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
3983 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4042 /* A retransmit, 2nd most common case. Force an immediate ack. */
3984 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 4043 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3985 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4044 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3986 4045
3987out_of_window: 4046out_of_window:
3988 tcp_enter_quickack_mode(sk); 4047 tcp_enter_quickack_mode(sk);
@@ -4004,7 +4063,7 @@ drop:
4004 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4063 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4005 TCP_SKB_CB(skb)->end_seq); 4064 TCP_SKB_CB(skb)->end_seq);
4006 4065
4007 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4066 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
4008 4067
4009 /* If window is closed, drop tail of packet. But after 4068 /* If window is closed, drop tail of packet. But after
4010 * remembering D-SACK for its head made in previous line. 4069 * remembering D-SACK for its head made in previous line.
@@ -4069,12 +4128,12 @@ drop:
4069 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4128 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4070 /* All the bits are present. Drop. */ 4129 /* All the bits are present. Drop. */
4071 __kfree_skb(skb); 4130 __kfree_skb(skb);
4072 tcp_dsack_set(tp, seq, end_seq); 4131 tcp_dsack_set(sk, seq, end_seq);
4073 goto add_sack; 4132 goto add_sack;
4074 } 4133 }
4075 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4134 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4076 /* Partial overlap. */ 4135 /* Partial overlap. */
4077 tcp_dsack_set(tp, seq, 4136 tcp_dsack_set(sk, seq,
4078 TCP_SKB_CB(skb1)->end_seq); 4137 TCP_SKB_CB(skb1)->end_seq);
4079 } else { 4138 } else {
4080 skb1 = skb1->prev; 4139 skb1 = skb1->prev;
@@ -4087,12 +4146,12 @@ drop:
4087 (struct sk_buff *)&tp->out_of_order_queue && 4146 (struct sk_buff *)&tp->out_of_order_queue &&
4088 after(end_seq, TCP_SKB_CB(skb1)->seq)) { 4147 after(end_seq, TCP_SKB_CB(skb1)->seq)) {
4089 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4148 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4090 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 4149 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4091 end_seq); 4150 end_seq);
4092 break; 4151 break;
4093 } 4152 }
4094 __skb_unlink(skb1, &tp->out_of_order_queue); 4153 __skb_unlink(skb1, &tp->out_of_order_queue);
4095 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 4154 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4096 TCP_SKB_CB(skb1)->end_seq); 4155 TCP_SKB_CB(skb1)->end_seq);
4097 __kfree_skb(skb1); 4156 __kfree_skb(skb1);
4098 } 4157 }
@@ -4123,7 +4182,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4123 struct sk_buff *next = skb->next; 4182 struct sk_buff *next = skb->next;
4124 __skb_unlink(skb, list); 4183 __skb_unlink(skb, list);
4125 __kfree_skb(skb); 4184 __kfree_skb(skb);
4126 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4127 skb = next; 4186 skb = next;
4128 continue; 4187 continue;
4129 } 4188 }
@@ -4191,7 +4250,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4191 struct sk_buff *next = skb->next; 4250 struct sk_buff *next = skb->next;
4192 __skb_unlink(skb, list); 4251 __skb_unlink(skb, list);
4193 __kfree_skb(skb); 4252 __kfree_skb(skb);
4194 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4195 skb = next; 4254 skb = next;
4196 if (skb == tail || 4255 if (skb == tail ||
4197 tcp_hdr(skb)->syn || 4256 tcp_hdr(skb)->syn ||
@@ -4254,7 +4313,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
4254 int res = 0; 4313 int res = 0;
4255 4314
4256 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4315 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4257 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 4316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
4258 __skb_queue_purge(&tp->out_of_order_queue); 4317 __skb_queue_purge(&tp->out_of_order_queue);
4259 4318
4260 /* Reset SACK state. A conforming SACK implementation will 4319 /* Reset SACK state. A conforming SACK implementation will
@@ -4283,7 +4342,7 @@ static int tcp_prune_queue(struct sock *sk)
4283 4342
4284 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4343 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4285 4344
4286 NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); 4345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
4287 4346
4288 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4347 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4289 tcp_clamp_window(sk); 4348 tcp_clamp_window(sk);
@@ -4312,7 +4371,7 @@ static int tcp_prune_queue(struct sock *sk)
4312 * drop receive data on the floor. It will get retransmitted 4371 * drop receive data on the floor. It will get retransmitted
4313 * and hopefully then we'll have sufficient space. 4372 * and hopefully then we'll have sufficient space.
4314 */ 4373 */
4315 NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); 4374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
4316 4375
4317 /* Massive buffer overcommit. */ 4376 /* Massive buffer overcommit. */
4318 tp->pred_flags = 0; 4377 tp->pred_flags = 0;
@@ -4742,7 +4801,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4742 tcp_data_snd_check(sk); 4801 tcp_data_snd_check(sk);
4743 return 0; 4802 return 0;
4744 } else { /* Header too small */ 4803 } else { /* Header too small */
4745 TCP_INC_STATS_BH(TCP_MIB_INERRS); 4804 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4746 goto discard; 4805 goto discard;
4747 } 4806 }
4748 } else { 4807 } else {
@@ -4779,7 +4838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4779 4838
4780 __skb_pull(skb, tcp_header_len); 4839 __skb_pull(skb, tcp_header_len);
4781 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4840 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4782 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); 4841 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
4783 } 4842 }
4784 if (copied_early) 4843 if (copied_early)
4785 tcp_cleanup_rbuf(sk, skb->len); 4844 tcp_cleanup_rbuf(sk, skb->len);
@@ -4802,7 +4861,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4802 if ((int)skb->truesize > sk->sk_forward_alloc) 4861 if ((int)skb->truesize > sk->sk_forward_alloc)
4803 goto step5; 4862 goto step5;
4804 4863
4805 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); 4864 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
4806 4865
4807 /* Bulk data transfer: receiver */ 4866 /* Bulk data transfer: receiver */
4808 __skb_pull(skb, tcp_header_len); 4867 __skb_pull(skb, tcp_header_len);
@@ -4846,7 +4905,7 @@ slow_path:
4846 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4905 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4847 tcp_paws_discard(sk, skb)) { 4906 tcp_paws_discard(sk, skb)) {
4848 if (!th->rst) { 4907 if (!th->rst) {
4849 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4908 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4850 tcp_send_dupack(sk, skb); 4909 tcp_send_dupack(sk, skb);
4851 goto discard; 4910 goto discard;
4852 } 4911 }
@@ -4881,8 +4940,8 @@ slow_path:
4881 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 4940 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4882 4941
4883 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4942 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4884 TCP_INC_STATS_BH(TCP_MIB_INERRS); 4943 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4885 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 4944 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4886 tcp_reset(sk); 4945 tcp_reset(sk);
4887 return 1; 4946 return 1;
4888 } 4947 }
@@ -4904,7 +4963,7 @@ step5:
4904 return 0; 4963 return 0;
4905 4964
4906csum_error: 4965csum_error:
4907 TCP_INC_STATS_BH(TCP_MIB_INERRS); 4966 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4908 4967
4909discard: 4968discard:
4910 __kfree_skb(skb); 4969 __kfree_skb(skb);
@@ -4938,7 +4997,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4938 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4997 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4939 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 4998 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4940 tcp_time_stamp)) { 4999 tcp_time_stamp)) {
4941 NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); 5000 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
4942 goto reset_and_undo; 5001 goto reset_and_undo;
4943 } 5002 }
4944 5003
@@ -5222,7 +5281,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5222 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5281 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5223 tcp_paws_discard(sk, skb)) { 5282 tcp_paws_discard(sk, skb)) {
5224 if (!th->rst) { 5283 if (!th->rst) {
5225 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 5284 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5226 tcp_send_dupack(sk, skb); 5285 tcp_send_dupack(sk, skb);
5227 goto discard; 5286 goto discard;
5228 } 5287 }
@@ -5251,7 +5310,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5251 * Check for a SYN in window. 5310 * Check for a SYN in window.
5252 */ 5311 */
5253 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5312 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5254 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 5313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5255 tcp_reset(sk); 5314 tcp_reset(sk);
5256 return 1; 5315 return 1;
5257 } 5316 }
@@ -5333,7 +5392,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5333 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5392 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5334 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5393 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5335 tcp_done(sk); 5394 tcp_done(sk);
5336 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5395 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5337 return 1; 5396 return 1;
5338 } 5397 }
5339 5398
@@ -5393,7 +5452,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5393 if (sk->sk_shutdown & RCV_SHUTDOWN) { 5452 if (sk->sk_shutdown & RCV_SHUTDOWN) {
5394 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5453 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5395 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5454 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5396 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5455 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5397 tcp_reset(sk); 5456 tcp_reset(sk);
5398 return 1; 5457 return 1;
5399 } 5458 }
@@ -5422,6 +5481,9 @@ EXPORT_SYMBOL(sysctl_tcp_ecn);
5422EXPORT_SYMBOL(sysctl_tcp_reordering); 5481EXPORT_SYMBOL(sysctl_tcp_reordering);
5423EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 5482EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5424EXPORT_SYMBOL(tcp_parse_options); 5483EXPORT_SYMBOL(tcp_parse_options);
5484#ifdef CONFIG_TCP_MD5SIG
5485EXPORT_SYMBOL(tcp_parse_md5sig_option);
5486#endif
5425EXPORT_SYMBOL(tcp_rcv_established); 5487EXPORT_SYMBOL(tcp_rcv_established);
5426EXPORT_SYMBOL(tcp_rcv_state_process); 5488EXPORT_SYMBOL(tcp_rcv_state_process);
5427EXPORT_SYMBOL(tcp_initialize_rcv_mss); 5489EXPORT_SYMBOL(tcp_initialize_rcv_mss);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ffe869ac1bcf..29adc668ad51 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9 *
10 * IPv4 specific functions 8 * IPv4 specific functions
11 * 9 *
12 * 10 *
@@ -91,8 +89,13 @@ static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 __be32 addr); 89 __be32 addr);
92static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 90static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
93 __be32 saddr, __be32 daddr, 91 __be32 saddr, __be32 daddr,
94 struct tcphdr *th, int protocol, 92 struct tcphdr *th, unsigned int tcplen);
95 unsigned int tcplen); 93#else
94static inline
95struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
96{
97 return NULL;
98}
96#endif 99#endif
97 100
98struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 101struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
@@ -172,7 +175,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
172 inet->sport, usin->sin_port, sk, 1); 175 inet->sport, usin->sin_port, sk, 1);
173 if (tmp < 0) { 176 if (tmp < 0) {
174 if (tmp == -ENETUNREACH) 177 if (tmp == -ENETUNREACH)
175 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 178 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
176 return tmp; 179 return tmp;
177 } 180 }
178 181
@@ -340,16 +343,17 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
340 struct sock *sk; 343 struct sock *sk;
341 __u32 seq; 344 __u32 seq;
342 int err; 345 int err;
346 struct net *net = dev_net(skb->dev);
343 347
344 if (skb->len < (iph->ihl << 2) + 8) { 348 if (skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 349 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
346 return; 350 return;
347 } 351 }
348 352
349 sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest, 353 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(skb)); 354 iph->saddr, th->source, inet_iif(skb));
351 if (!sk) { 355 if (!sk) {
352 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 356 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 return; 357 return;
354 } 358 }
355 if (sk->sk_state == TCP_TIME_WAIT) { 359 if (sk->sk_state == TCP_TIME_WAIT) {
@@ -362,7 +366,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
362 * servers this needs to be solved differently. 366 * servers this needs to be solved differently.
363 */ 367 */
364 if (sock_owned_by_user(sk)) 368 if (sock_owned_by_user(sk))
365 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 369 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366 370
367 if (sk->sk_state == TCP_CLOSE) 371 if (sk->sk_state == TCP_CLOSE)
368 goto out; 372 goto out;
@@ -371,7 +375,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
371 seq = ntohl(th->seq); 375 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN && 376 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) { 377 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 goto out; 379 goto out;
376 } 380 }
377 381
@@ -418,7 +422,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
418 BUG_TRAP(!req->sk); 422 BUG_TRAP(!req->sk);
419 423
420 if (seq != tcp_rsk(req)->snt_isn) { 424 if (seq != tcp_rsk(req)->snt_isn) {
421 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 425 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
422 goto out; 426 goto out;
423 } 427 }
424 428
@@ -540,6 +544,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
540#ifdef CONFIG_TCP_MD5SIG 544#ifdef CONFIG_TCP_MD5SIG
541 struct tcp_md5sig_key *key; 545 struct tcp_md5sig_key *key;
542#endif 546#endif
547 struct net *net;
543 548
544 /* Never send a reset in response to a reset. */ 549 /* Never send a reset in response to a reset. */
545 if (th->rst) 550 if (th->rst)
@@ -582,8 +587,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
582 key, 587 key,
583 ip_hdr(skb)->daddr, 588 ip_hdr(skb)->daddr,
584 ip_hdr(skb)->saddr, 589 ip_hdr(skb)->saddr,
585 &rep.th, IPPROTO_TCP, 590 &rep.th, arg.iov[0].iov_len);
586 arg.iov[0].iov_len);
587 } 591 }
588#endif 592#endif
589 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 593 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
@@ -591,20 +595,21 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
591 sizeof(struct tcphdr), IPPROTO_TCP, 0); 595 sizeof(struct tcphdr), IPPROTO_TCP, 0);
592 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 596 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
593 597
594 ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb, 598 net = dev_net(skb->dst->dev);
599 ip_send_reply(net->ipv4.tcp_sock, skb,
595 &arg, arg.iov[0].iov_len); 600 &arg, arg.iov[0].iov_len);
596 601
597 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 602 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
598 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 603 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
599} 604}
600 605
601/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states 606/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
602 outside socket context is ugly, certainly. What can I do? 607 outside socket context is ugly, certainly. What can I do?
603 */ 608 */
604 609
605static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, 610static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
606 struct sk_buff *skb, u32 seq, u32 ack, 611 u32 win, u32 ts, int oif,
607 u32 win, u32 ts) 612 struct tcp_md5sig_key *key)
608{ 613{
609 struct tcphdr *th = tcp_hdr(skb); 614 struct tcphdr *th = tcp_hdr(skb);
610 struct { 615 struct {
@@ -616,10 +621,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
616 ]; 621 ];
617 } rep; 622 } rep;
618 struct ip_reply_arg arg; 623 struct ip_reply_arg arg;
619#ifdef CONFIG_TCP_MD5SIG 624 struct net *net = dev_net(skb->dev);
620 struct tcp_md5sig_key *key;
621 struct tcp_md5sig_key tw_key;
622#endif
623 625
624 memset(&rep.th, 0, sizeof(struct tcphdr)); 626 memset(&rep.th, 0, sizeof(struct tcphdr));
625 memset(&arg, 0, sizeof(arg)); 627 memset(&arg, 0, sizeof(arg));
@@ -645,23 +647,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
645 rep.th.window = htons(win); 647 rep.th.window = htons(win);
646 648
647#ifdef CONFIG_TCP_MD5SIG 649#ifdef CONFIG_TCP_MD5SIG
648 /*
649 * The SKB holds an imcoming packet, but may not have a valid ->sk
650 * pointer. This is especially the case when we're dealing with a
651 * TIME_WAIT ack, because the sk structure is long gone, and only
652 * the tcp_timewait_sock remains. So the md5 key is stashed in that
653 * structure, and we use it in preference. I believe that (twsk ||
654 * skb->sk) holds true, but we program defensively.
655 */
656 if (!twsk && skb->sk) {
657 key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
658 } else if (twsk && twsk->tw_md5_keylen) {
659 tw_key.key = twsk->tw_md5_key;
660 tw_key.keylen = twsk->tw_md5_keylen;
661 key = &tw_key;
662 } else
663 key = NULL;
664
665 if (key) { 650 if (key) {
666 int offset = (ts) ? 3 : 0; 651 int offset = (ts) ? 3 : 0;
667 652
@@ -676,21 +661,20 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
676 key, 661 key,
677 ip_hdr(skb)->daddr, 662 ip_hdr(skb)->daddr,
678 ip_hdr(skb)->saddr, 663 ip_hdr(skb)->saddr,
679 &rep.th, IPPROTO_TCP, 664 &rep.th, arg.iov[0].iov_len);
680 arg.iov[0].iov_len);
681 } 665 }
682#endif 666#endif
683 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 667 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
684 ip_hdr(skb)->saddr, /* XXX */ 668 ip_hdr(skb)->saddr, /* XXX */
685 arg.iov[0].iov_len, IPPROTO_TCP, 0); 669 arg.iov[0].iov_len, IPPROTO_TCP, 0);
686 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 670 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
687 if (twsk) 671 if (oif)
688 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if; 672 arg.bound_dev_if = oif;
689 673
690 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb, 674 ip_send_reply(net->ipv4.tcp_sock, skb,
691 &arg, arg.iov[0].iov_len); 675 &arg, arg.iov[0].iov_len);
692 676
693 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 677 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
694} 678}
695 679
696static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 680static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -698,9 +682,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
698 struct inet_timewait_sock *tw = inet_twsk(sk); 682 struct inet_timewait_sock *tw = inet_twsk(sk);
699 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 683 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
700 684
701 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 685 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
702 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 686 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
703 tcptw->tw_ts_recent); 687 tcptw->tw_ts_recent,
688 tw->tw_bound_dev_if,
689 tcp_twsk_md5_key(tcptw)
690 );
704 691
705 inet_twsk_put(tw); 692 inet_twsk_put(tw);
706} 693}
@@ -708,9 +695,11 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
708static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, 695static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
709 struct request_sock *req) 696 struct request_sock *req)
710{ 697{
711 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, 698 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
712 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 699 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
713 req->ts_recent); 700 req->ts_recent,
701 0,
702 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr));
714} 703}
715 704
716/* 705/*
@@ -1002,18 +991,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1002 991
1003static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 992static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1004 __be32 saddr, __be32 daddr, 993 __be32 saddr, __be32 daddr,
1005 struct tcphdr *th, int protocol, 994 struct tcphdr *th,
1006 unsigned int tcplen) 995 unsigned int tcplen)
1007{ 996{
1008 struct scatterlist sg[4];
1009 __u16 data_len;
1010 int block = 0;
1011 __sum16 old_checksum;
1012 struct tcp_md5sig_pool *hp; 997 struct tcp_md5sig_pool *hp;
1013 struct tcp4_pseudohdr *bp; 998 struct tcp4_pseudohdr *bp;
1014 struct hash_desc *desc;
1015 int err; 999 int err;
1016 unsigned int nbytes = 0;
1017 1000
1018 /* 1001 /*
1019 * Okay, so RFC2385 is turned on for this connection, 1002 * Okay, so RFC2385 is turned on for this connection,
@@ -1025,63 +1008,25 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1025 goto clear_hash_noput; 1008 goto clear_hash_noput;
1026 1009
1027 bp = &hp->md5_blk.ip4; 1010 bp = &hp->md5_blk.ip4;
1028 desc = &hp->md5_desc;
1029 1011
1030 /* 1012 /*
1031 * 1. the TCP pseudo-header (in the order: source IP address, 1013 * The TCP pseudo-header (in the order: source IP address,
1032 * destination IP address, zero-padded protocol number, and 1014 * destination IP address, zero-padded protocol number, and
1033 * segment length) 1015 * segment length)
1034 */ 1016 */
1035 bp->saddr = saddr; 1017 bp->saddr = saddr;
1036 bp->daddr = daddr; 1018 bp->daddr = daddr;
1037 bp->pad = 0; 1019 bp->pad = 0;
1038 bp->protocol = protocol; 1020 bp->protocol = IPPROTO_TCP;
1039 bp->len = htons(tcplen); 1021 bp->len = htons(tcplen);
1040 1022
1041 sg_init_table(sg, 4); 1023 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
1042 1024 th, tcplen, hp);
1043 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1044 nbytes += sizeof(*bp);
1045
1046 /* 2. the TCP header, excluding options, and assuming a
1047 * checksum of zero/
1048 */
1049 old_checksum = th->check;
1050 th->check = 0;
1051 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1052 nbytes += sizeof(struct tcphdr);
1053
1054 /* 3. the TCP segment data (if any) */
1055 data_len = tcplen - (th->doff << 2);
1056 if (data_len > 0) {
1057 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1058 sg_set_buf(&sg[block++], data, data_len);
1059 nbytes += data_len;
1060 }
1061
1062 /* 4. an independently-specified key or password, known to both
1063 * TCPs and presumably connection-specific
1064 */
1065 sg_set_buf(&sg[block++], key->key, key->keylen);
1066 nbytes += key->keylen;
1067
1068 sg_mark_end(&sg[block - 1]);
1069
1070 /* Now store the Hash into the packet */
1071 err = crypto_hash_init(desc);
1072 if (err)
1073 goto clear_hash;
1074 err = crypto_hash_update(desc, sg, nbytes);
1075 if (err)
1076 goto clear_hash;
1077 err = crypto_hash_final(desc, md5_hash);
1078 if (err) 1025 if (err)
1079 goto clear_hash; 1026 goto clear_hash;
1080 1027
1081 /* Reset header, and free up the crypto */ 1028 /* Free up the crypto pool */
1082 tcp_put_md5sig_pool(); 1029 tcp_put_md5sig_pool();
1083 th->check = old_checksum;
1084
1085out: 1030out:
1086 return 0; 1031 return 0;
1087clear_hash: 1032clear_hash:
@@ -1095,7 +1040,7 @@ int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1095 struct sock *sk, 1040 struct sock *sk,
1096 struct dst_entry *dst, 1041 struct dst_entry *dst,
1097 struct request_sock *req, 1042 struct request_sock *req,
1098 struct tcphdr *th, int protocol, 1043 struct tcphdr *th,
1099 unsigned int tcplen) 1044 unsigned int tcplen)
1100{ 1045{
1101 __be32 saddr, daddr; 1046 __be32 saddr, daddr;
@@ -1111,7 +1056,7 @@ int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1111 } 1056 }
1112 return tcp_v4_do_calc_md5_hash(md5_hash, key, 1057 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1113 saddr, daddr, 1058 saddr, daddr,
1114 th, protocol, tcplen); 1059 th, tcplen);
1115} 1060}
1116 1061
1117EXPORT_SYMBOL(tcp_v4_calc_md5_hash); 1062EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
@@ -1130,52 +1075,12 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1130 struct tcp_md5sig_key *hash_expected; 1075 struct tcp_md5sig_key *hash_expected;
1131 const struct iphdr *iph = ip_hdr(skb); 1076 const struct iphdr *iph = ip_hdr(skb);
1132 struct tcphdr *th = tcp_hdr(skb); 1077 struct tcphdr *th = tcp_hdr(skb);
1133 int length = (th->doff << 2) - sizeof(struct tcphdr);
1134 int genhash; 1078 int genhash;
1135 unsigned char *ptr;
1136 unsigned char newhash[16]; 1079 unsigned char newhash[16];
1137 1080
1138 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); 1081 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1082 hash_location = tcp_parse_md5sig_option(th);
1139 1083
1140 /*
1141 * If the TCP option length is less than the TCP_MD5SIG
1142 * option length, then we can shortcut
1143 */
1144 if (length < TCPOLEN_MD5SIG) {
1145 if (hash_expected)
1146 return 1;
1147 else
1148 return 0;
1149 }
1150
1151 /* Okay, we can't shortcut - we have to grub through the options */
1152 ptr = (unsigned char *)(th + 1);
1153 while (length > 0) {
1154 int opcode = *ptr++;
1155 int opsize;
1156
1157 switch (opcode) {
1158 case TCPOPT_EOL:
1159 goto done_opts;
1160 case TCPOPT_NOP:
1161 length--;
1162 continue;
1163 default:
1164 opsize = *ptr++;
1165 if (opsize < 2)
1166 goto done_opts;
1167 if (opsize > length)
1168 goto done_opts;
1169
1170 if (opcode == TCPOPT_MD5SIG) {
1171 hash_location = ptr;
1172 goto done_opts;
1173 }
1174 }
1175 ptr += opsize-2;
1176 length -= opsize;
1177 }
1178done_opts:
1179 /* We've parsed the options - do we have a hash? */ 1084 /* We've parsed the options - do we have a hash? */
1180 if (!hash_expected && !hash_location) 1085 if (!hash_expected && !hash_location)
1181 return 0; 1086 return 0;
@@ -1202,8 +1107,7 @@ done_opts:
1202 genhash = tcp_v4_do_calc_md5_hash(newhash, 1107 genhash = tcp_v4_do_calc_md5_hash(newhash,
1203 hash_expected, 1108 hash_expected,
1204 iph->saddr, iph->daddr, 1109 iph->saddr, iph->daddr,
1205 th, sk->sk_protocol, 1110 th, skb->len);
1206 skb->len);
1207 1111
1208 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1112 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1209 if (net_ratelimit()) { 1113 if (net_ratelimit()) {
@@ -1347,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1347 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1251 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1348 (s32)(peer->tcp_ts - req->ts_recent) > 1252 (s32)(peer->tcp_ts - req->ts_recent) >
1349 TCP_PAWS_WINDOW) { 1253 TCP_PAWS_WINDOW) {
1350 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); 1254 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1351 goto drop_and_release; 1255 goto drop_and_release;
1352 } 1256 }
1353 } 1257 }
@@ -1461,9 +1365,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1461 return newsk; 1365 return newsk;
1462 1366
1463exit_overflow: 1367exit_overflow:
1464 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1368 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1465exit: 1369exit:
1466 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1370 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1467 dst_release(dst); 1371 dst_release(dst);
1468 return NULL; 1372 return NULL;
1469} 1373}
@@ -1590,7 +1494,7 @@ discard:
1590 return 0; 1494 return 0;
1591 1495
1592csum_err: 1496csum_err:
1593 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1497 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1594 goto discard; 1498 goto discard;
1595} 1499}
1596 1500
@@ -1604,12 +1508,13 @@ int tcp_v4_rcv(struct sk_buff *skb)
1604 struct tcphdr *th; 1508 struct tcphdr *th;
1605 struct sock *sk; 1509 struct sock *sk;
1606 int ret; 1510 int ret;
1511 struct net *net = dev_net(skb->dev);
1607 1512
1608 if (skb->pkt_type != PACKET_HOST) 1513 if (skb->pkt_type != PACKET_HOST)
1609 goto discard_it; 1514 goto discard_it;
1610 1515
1611 /* Count it even if it's bad */ 1516 /* Count it even if it's bad */
1612 TCP_INC_STATS_BH(TCP_MIB_INSEGS); 1517 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1613 1518
1614 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1519 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1615 goto discard_it; 1520 goto discard_it;
@@ -1638,7 +1543,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1638 TCP_SKB_CB(skb)->flags = iph->tos; 1543 TCP_SKB_CB(skb)->flags = iph->tos;
1639 TCP_SKB_CB(skb)->sacked = 0; 1544 TCP_SKB_CB(skb)->sacked = 0;
1640 1545
1641 sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, 1546 sk = __inet_lookup(net, &tcp_hashinfo, iph->saddr,
1642 th->source, iph->daddr, th->dest, inet_iif(skb)); 1547 th->source, iph->daddr, th->dest, inet_iif(skb));
1643 if (!sk) 1548 if (!sk)
1644 goto no_tcp_socket; 1549 goto no_tcp_socket;
@@ -1685,7 +1590,7 @@ no_tcp_socket:
1685 1590
1686 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1591 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1687bad_packet: 1592bad_packet:
1688 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1593 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1689 } else { 1594 } else {
1690 tcp_v4_send_reset(NULL, skb); 1595 tcp_v4_send_reset(NULL, skb);
1691 } 1596 }
@@ -1706,7 +1611,7 @@ do_time_wait:
1706 } 1611 }
1707 1612
1708 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1613 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1709 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1614 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1710 inet_twsk_put(inet_twsk(sk)); 1615 inet_twsk_put(inet_twsk(sk));
1711 goto discard_it; 1616 goto discard_it;
1712 } 1617 }
@@ -1871,7 +1776,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1871 return 0; 1776 return 0;
1872} 1777}
1873 1778
1874int tcp_v4_destroy_sock(struct sock *sk) 1779void tcp_v4_destroy_sock(struct sock *sk)
1875{ 1780{
1876 struct tcp_sock *tp = tcp_sk(sk); 1781 struct tcp_sock *tp = tcp_sk(sk);
1877 1782
@@ -1915,8 +1820,6 @@ int tcp_v4_destroy_sock(struct sock *sk)
1915 } 1820 }
1916 1821
1917 atomic_dec(&tcp_sockets_allocated); 1822 atomic_dec(&tcp_sockets_allocated);
1918
1919 return 0;
1920} 1823}
1921 1824
1922EXPORT_SYMBOL(tcp_v4_destroy_sock); 1825EXPORT_SYMBOL(tcp_v4_destroy_sock);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 8245247a6ceb..204c42162660 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -246,7 +244,7 @@ kill:
246 } 244 }
247 245
248 if (paws_reject) 246 if (paws_reject)
249 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 247 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
250 248
251 if (!th->rst) { 249 if (!th->rst) {
252 /* In this case we must reset the TIMEWAIT timer. 250 /* In this case we must reset the TIMEWAIT timer.
@@ -482,7 +480,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
482 newtp->rx_opt.mss_clamp = req->mss; 480 newtp->rx_opt.mss_clamp = req->mss;
483 TCP_ECN_openreq_child(newtp, req); 481 TCP_ECN_openreq_child(newtp, req);
484 482
485 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); 483 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
486 } 484 }
487 return newsk; 485 return newsk;
488} 486}
@@ -613,7 +611,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
613 if (!(flg & TCP_FLAG_RST)) 611 if (!(flg & TCP_FLAG_RST))
614 req->rsk_ops->send_ack(skb, req); 612 req->rsk_ops->send_ack(skb, req);
615 if (paws_reject) 613 if (paws_reject)
616 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 614 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
617 return NULL; 615 return NULL;
618 } 616 }
619 617
@@ -632,7 +630,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
632 * "fourth, check the SYN bit" 630 * "fourth, check the SYN bit"
633 */ 631 */
634 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 632 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
635 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 633 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
636 goto embryonic_reset; 634 goto embryonic_reset;
637 } 635 }
638 636
@@ -697,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
697 } 695 }
698 696
699 embryonic_reset: 697 embryonic_reset:
700 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); 698 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
701 if (!(flg & TCP_FLAG_RST)) 699 if (!(flg & TCP_FLAG_RST))
702 req->rsk_ops->send_reset(sk, skb); 700 req->rsk_ops->send_reset(sk, skb);
703 701
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ad993ecb4810..36a19707f67f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -607,7 +605,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
607 md5, 605 md5,
608 sk, NULL, NULL, 606 sk, NULL, NULL,
609 tcp_hdr(skb), 607 tcp_hdr(skb),
610 sk->sk_protocol,
611 skb->len); 608 skb->len);
612 } 609 }
613#endif 610#endif
@@ -621,7 +618,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
621 tcp_event_data_sent(tp, skb, sk); 618 tcp_event_data_sent(tp, skb, sk);
622 619
623 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 620 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
624 TCP_INC_STATS(TCP_MIB_OUTSEGS); 621 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
625 622
626 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 623 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
627 if (likely(err <= 0)) 624 if (likely(err <= 0))
@@ -1913,7 +1910,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1913 1910
1914 if (err == 0) { 1911 if (err == 0) {
1915 /* Update global TCP statistics. */ 1912 /* Update global TCP statistics. */
1916 TCP_INC_STATS(TCP_MIB_RETRANSSEGS); 1913 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
1917 1914
1918 tp->total_retrans++; 1915 tp->total_retrans++;
1919 1916
@@ -1988,14 +1985,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1988 1985
1989 if (sacked & TCPCB_LOST) { 1986 if (sacked & TCPCB_LOST) {
1990 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 1987 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1988 int mib_idx;
1989
1991 if (tcp_retransmit_skb(sk, skb)) { 1990 if (tcp_retransmit_skb(sk, skb)) {
1992 tp->retransmit_skb_hint = NULL; 1991 tp->retransmit_skb_hint = NULL;
1993 return; 1992 return;
1994 } 1993 }
1995 if (icsk->icsk_ca_state != TCP_CA_Loss) 1994 if (icsk->icsk_ca_state != TCP_CA_Loss)
1996 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 1995 mib_idx = LINUX_MIB_TCPFASTRETRANS;
1997 else 1996 else
1998 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 1997 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
1998 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1999 1999
2000 if (skb == tcp_write_queue_head(sk)) 2000 if (skb == tcp_write_queue_head(sk))
2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -2065,7 +2065,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2065 inet_csk(sk)->icsk_rto, 2065 inet_csk(sk)->icsk_rto,
2066 TCP_RTO_MAX); 2066 TCP_RTO_MAX);
2067 2067
2068 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 2068 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS);
2069 } 2069 }
2070} 2070}
2071 2071
@@ -2119,7 +2119,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2119 /* NOTE: No TCP options attached and we never retransmit this. */ 2119 /* NOTE: No TCP options attached and we never retransmit this. */
2120 skb = alloc_skb(MAX_TCP_HEADER, priority); 2120 skb = alloc_skb(MAX_TCP_HEADER, priority);
2121 if (!skb) { 2121 if (!skb) {
2122 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 2122 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2123 return; 2123 return;
2124 } 2124 }
2125 2125
@@ -2130,9 +2130,9 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2130 /* Send it off. */ 2130 /* Send it off. */
2131 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2131 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2132 if (tcp_transmit_skb(sk, skb, 0, priority)) 2132 if (tcp_transmit_skb(sk, skb, 0, priority))
2133 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 2133 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2134 2134
2135 TCP_INC_STATS(TCP_MIB_OUTRSTS); 2135 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
2136} 2136}
2137 2137
2138/* WARNING: This routine must only be called when we have already sent 2138/* WARNING: This routine must only be called when we have already sent
@@ -2258,7 +2258,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2258 ); 2258 );
2259 2259
2260 th->doff = (tcp_header_size >> 2); 2260 th->doff = (tcp_header_size >> 2);
2261 TCP_INC_STATS(TCP_MIB_OUTSEGS); 2261 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
2262 2262
2263#ifdef CONFIG_TCP_MD5SIG 2263#ifdef CONFIG_TCP_MD5SIG
2264 /* Okay, we have all we need - do the md5 hash if needed */ 2264 /* Okay, we have all we need - do the md5 hash if needed */
@@ -2266,7 +2266,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2266 tp->af_specific->calc_md5_hash(md5_hash_location, 2266 tp->af_specific->calc_md5_hash(md5_hash_location,
2267 md5, 2267 md5,
2268 NULL, dst, req, 2268 NULL, dst, req,
2269 tcp_hdr(skb), sk->sk_protocol, 2269 tcp_hdr(skb),
2270 skb->len); 2270 skb->len);
2271 } 2271 }
2272#endif 2272#endif
@@ -2367,7 +2367,7 @@ int tcp_connect(struct sock *sk)
2367 */ 2367 */
2368 tp->snd_nxt = tp->write_seq; 2368 tp->snd_nxt = tp->write_seq;
2369 tp->pushed_seq = tp->write_seq; 2369 tp->pushed_seq = tp->write_seq;
2370 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2370 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
2371 2371
2372 /* Timer for repeating the SYN until an answer. */ 2372 /* Timer for repeating the SYN until an answer. */
2373 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2373 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 63ed9d6830e7..328e0cf42b3c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -50,7 +48,7 @@ static void tcp_write_err(struct sock *sk)
50 sk->sk_error_report(sk); 48 sk->sk_error_report(sk);
51 49
52 tcp_done(sk); 50 tcp_done(sk);
53 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT); 51 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
54} 52}
55 53
56/* Do not allow orphaned sockets to eat all our resources. 54/* Do not allow orphaned sockets to eat all our resources.
@@ -91,7 +89,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
91 if (do_reset) 89 if (do_reset)
92 tcp_send_active_reset(sk, GFP_ATOMIC); 90 tcp_send_active_reset(sk, GFP_ATOMIC);
93 tcp_done(sk); 91 tcp_done(sk);
94 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 92 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
95 return 1; 93 return 1;
96 } 94 }
97 return 0; 95 return 0;
@@ -181,7 +179,7 @@ static void tcp_delack_timer(unsigned long data)
181 if (sock_owned_by_user(sk)) { 179 if (sock_owned_by_user(sk)) {
182 /* Try again later. */ 180 /* Try again later. */
183 icsk->icsk_ack.blocked = 1; 181 icsk->icsk_ack.blocked = 1;
184 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
185 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); 183 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
186 goto out_unlock; 184 goto out_unlock;
187 } 185 }
@@ -200,7 +198,7 @@ static void tcp_delack_timer(unsigned long data)
200 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 198 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
201 struct sk_buff *skb; 199 struct sk_buff *skb;
202 200
203 NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); 201 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
204 202
205 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 203 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
206 sk->sk_backlog_rcv(sk, skb); 204 sk->sk_backlog_rcv(sk, skb);
@@ -220,7 +218,7 @@ static void tcp_delack_timer(unsigned long data)
220 icsk->icsk_ack.ato = TCP_ATO_MIN; 218 icsk->icsk_ack.ato = TCP_ATO_MIN;
221 } 219 }
222 tcp_send_ack(sk); 220 tcp_send_ack(sk);
223 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 221 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
224 } 222 }
225 TCP_CHECK_TIMER(sk); 223 TCP_CHECK_TIMER(sk);
226 224
@@ -328,24 +326,27 @@ static void tcp_retransmit_timer(struct sock *sk)
328 goto out; 326 goto out;
329 327
330 if (icsk->icsk_retransmits == 0) { 328 if (icsk->icsk_retransmits == 0) {
329 int mib_idx;
330
331 if (icsk->icsk_ca_state == TCP_CA_Disorder || 331 if (icsk->icsk_ca_state == TCP_CA_Disorder ||
332 icsk->icsk_ca_state == TCP_CA_Recovery) { 332 icsk->icsk_ca_state == TCP_CA_Recovery) {
333 if (tcp_is_sack(tp)) { 333 if (tcp_is_sack(tp)) {
334 if (icsk->icsk_ca_state == TCP_CA_Recovery) 334 if (icsk->icsk_ca_state == TCP_CA_Recovery)
335 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); 335 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
336 else 336 else
337 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); 337 mib_idx = LINUX_MIB_TCPSACKFAILURES;
338 } else { 338 } else {
339 if (icsk->icsk_ca_state == TCP_CA_Recovery) 339 if (icsk->icsk_ca_state == TCP_CA_Recovery)
340 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); 340 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
341 else 341 else
342 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); 342 mib_idx = LINUX_MIB_TCPRENOFAILURES;
343 } 343 }
344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { 344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
345 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); 345 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
346 } else { 346 } else {
347 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); 347 mib_idx = LINUX_MIB_TCPTIMEOUTS;
348 } 348 }
349 NET_INC_STATS_BH(sock_net(sk), mib_idx);
349 } 350 }
350 351
351 if (tcp_use_frto(sk)) { 352 if (tcp_use_frto(sk)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 56fcda3694ba..1560d11ba6ef 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The User Datagram Protocol (UDP). 6 * The User Datagram Protocol (UDP).
7 * 7 *
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
@@ -136,7 +134,7 @@ static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
136 struct sock *sk; 134 struct sock *sk;
137 struct hlist_node *node; 135 struct hlist_node *node;
138 136
139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 137 sk_for_each(sk, node, &udptable[udp_hashfn(net, num)])
140 if (net_eq(sock_net(sk), net) && sk->sk_hash == num) 138 if (net_eq(sock_net(sk), net) && sk->sk_hash == num)
141 return 1; 139 return 1;
142 return 0; 140 return 0;
@@ -176,7 +174,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
176 for (i = 0; i < UDP_HTABLE_SIZE; i++) { 174 for (i = 0; i < UDP_HTABLE_SIZE; i++) {
177 int size = 0; 175 int size = 0;
178 176
179 head = &udptable[rover & (UDP_HTABLE_SIZE - 1)]; 177 head = &udptable[udp_hashfn(net, rover)];
180 if (hlist_empty(head)) 178 if (hlist_empty(head))
181 goto gotit; 179 goto gotit;
182 180
@@ -213,7 +211,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
213gotit: 211gotit:
214 snum = rover; 212 snum = rover;
215 } else { 213 } else {
216 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 214 head = &udptable[udp_hashfn(net, snum)];
217 215
218 sk_for_each(sk2, node, head) 216 sk_for_each(sk2, node, head)
219 if (sk2->sk_hash == snum && 217 if (sk2->sk_hash == snum &&
@@ -229,7 +227,7 @@ gotit:
229 inet_sk(sk)->num = snum; 227 inet_sk(sk)->num = snum;
230 sk->sk_hash = snum; 228 sk->sk_hash = snum;
231 if (sk_unhashed(sk)) { 229 if (sk_unhashed(sk)) {
232 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 230 head = &udptable[udp_hashfn(net, snum)];
233 sk_add_node(sk, head); 231 sk_add_node(sk, head);
234 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 232 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
235 } 233 }
@@ -266,7 +264,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
266 int badness = -1; 264 int badness = -1;
267 265
268 read_lock(&udp_hash_lock); 266 read_lock(&udp_hash_lock);
269 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 267 sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) {
270 struct inet_sock *inet = inet_sk(sk); 268 struct inet_sock *inet = inet_sk(sk);
271 269
272 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && 270 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
@@ -356,11 +354,12 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
356 struct sock *sk; 354 struct sock *sk;
357 int harderr; 355 int harderr;
358 int err; 356 int err;
357 struct net *net = dev_net(skb->dev);
359 358
360 sk = __udp4_lib_lookup(dev_net(skb->dev), iph->daddr, uh->dest, 359 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
361 iph->saddr, uh->source, skb->dev->ifindex, udptable); 360 iph->saddr, uh->source, skb->dev->ifindex, udptable);
362 if (sk == NULL) { 361 if (sk == NULL) {
363 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 362 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
364 return; /* No socket for error */ 363 return; /* No socket for error */
365 } 364 }
366 365
@@ -528,7 +527,8 @@ out:
528 up->len = 0; 527 up->len = 0;
529 up->pending = 0; 528 up->pending = 0;
530 if (!err) 529 if (!err)
531 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); 530 UDP_INC_STATS_USER(sock_net(sk),
531 UDP_MIB_OUTDATAGRAMS, is_udplite);
532 return err; 532 return err;
533} 533}
534 534
@@ -656,11 +656,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
656 .uli_u = { .ports = 656 .uli_u = { .ports =
657 { .sport = inet->sport, 657 { .sport = inet->sport,
658 .dport = dport } } }; 658 .dport = dport } } };
659 struct net *net = sock_net(sk);
660
659 security_sk_classify_flow(sk, &fl); 661 security_sk_classify_flow(sk, &fl);
660 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1); 662 err = ip_route_output_flow(net, &rt, &fl, sk, 1);
661 if (err) { 663 if (err) {
662 if (err == -ENETUNREACH) 664 if (err == -ENETUNREACH)
663 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 665 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
664 goto out; 666 goto out;
665 } 667 }
666 668
@@ -727,7 +729,8 @@ out:
727 * seems like overkill. 729 * seems like overkill.
728 */ 730 */
729 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 731 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
730 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); 732 UDP_INC_STATS_USER(sock_net(sk),
733 UDP_MIB_SNDBUFERRORS, is_udplite);
731 } 734 }
732 return err; 735 return err;
733 736
@@ -890,7 +893,8 @@ try_again:
890 goto out_free; 893 goto out_free;
891 894
892 if (!peeked) 895 if (!peeked)
893 UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); 896 UDP_INC_STATS_USER(sock_net(sk),
897 UDP_MIB_INDATAGRAMS, is_udplite);
894 898
895 sock_recv_timestamp(msg, sk, skb); 899 sock_recv_timestamp(msg, sk, skb);
896 900
@@ -919,7 +923,7 @@ out:
919csum_copy_err: 923csum_copy_err:
920 lock_sock(sk); 924 lock_sock(sk);
921 if (!skb_kill_datagram(sk, skb, flags)) 925 if (!skb_kill_datagram(sk, skb, flags))
922 UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); 926 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
923 release_sock(sk); 927 release_sock(sk);
924 928
925 if (noblock) 929 if (noblock)
@@ -990,7 +994,8 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
990 994
991 ret = (*up->encap_rcv)(sk, skb); 995 ret = (*up->encap_rcv)(sk, skb);
992 if (ret <= 0) { 996 if (ret <= 0) {
993 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 997 UDP_INC_STATS_BH(sock_net(sk),
998 UDP_MIB_INDATAGRAMS,
994 is_udplite); 999 is_udplite);
995 return -ret; 1000 return -ret;
996 } 1001 }
@@ -1042,15 +1047,18 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1042 1047
1043 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1048 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
1044 /* Note that an ENOMEM error is charged twice */ 1049 /* Note that an ENOMEM error is charged twice */
1045 if (rc == -ENOMEM) 1050 if (rc == -ENOMEM) {
1046 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); 1051 UDP_INC_STATS_BH(sock_net(sk),
1052 UDP_MIB_RCVBUFERRORS, is_udplite);
1053 atomic_inc(&sk->sk_drops);
1054 }
1047 goto drop; 1055 goto drop;
1048 } 1056 }
1049 1057
1050 return 0; 1058 return 0;
1051 1059
1052drop: 1060drop:
1053 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 1061 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1054 kfree_skb(skb); 1062 kfree_skb(skb);
1055 return -1; 1063 return -1;
1056} 1064}
@@ -1061,7 +1069,7 @@ drop:
1061 * Note: called only from the BH handler context, 1069 * Note: called only from the BH handler context,
1062 * so we don't need to lock the hashes. 1070 * so we don't need to lock the hashes.
1063 */ 1071 */
1064static int __udp4_lib_mcast_deliver(struct sk_buff *skb, 1072static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1065 struct udphdr *uh, 1073 struct udphdr *uh,
1066 __be32 saddr, __be32 daddr, 1074 __be32 saddr, __be32 daddr,
1067 struct hlist_head udptable[]) 1075 struct hlist_head udptable[])
@@ -1070,7 +1078,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1070 int dif; 1078 int dif;
1071 1079
1072 read_lock(&udp_hash_lock); 1080 read_lock(&udp_hash_lock);
1073 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); 1081 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
1074 dif = skb->dev->ifindex; 1082 dif = skb->dev->ifindex;
1075 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 1083 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1076 if (sk) { 1084 if (sk) {
@@ -1158,6 +1166,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1158 struct rtable *rt = (struct rtable*)skb->dst; 1166 struct rtable *rt = (struct rtable*)skb->dst;
1159 __be32 saddr = ip_hdr(skb)->saddr; 1167 __be32 saddr = ip_hdr(skb)->saddr;
1160 __be32 daddr = ip_hdr(skb)->daddr; 1168 __be32 daddr = ip_hdr(skb)->daddr;
1169 struct net *net = dev_net(skb->dev);
1161 1170
1162 /* 1171 /*
1163 * Validate the packet. 1172 * Validate the packet.
@@ -1180,9 +1189,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1180 goto csum_error; 1189 goto csum_error;
1181 1190
1182 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1191 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1183 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1192 return __udp4_lib_mcast_deliver(net, skb, uh,
1193 saddr, daddr, udptable);
1184 1194
1185 sk = __udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr, 1195 sk = __udp4_lib_lookup(net, saddr, uh->source, daddr,
1186 uh->dest, inet_iif(skb), udptable); 1196 uh->dest, inet_iif(skb), udptable);
1187 1197
1188 if (sk != NULL) { 1198 if (sk != NULL) {
@@ -1211,7 +1221,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1211 if (udp_lib_checksum_complete(skb)) 1221 if (udp_lib_checksum_complete(skb))
1212 goto csum_error; 1222 goto csum_error;
1213 1223
1214 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1224 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1215 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 1225 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1216 1226
1217 /* 1227 /*
@@ -1245,7 +1255,7 @@ csum_error:
1245 ntohs(uh->dest), 1255 ntohs(uh->dest),
1246 ulen); 1256 ulen);
1247drop: 1257drop:
1248 UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1258 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1249 kfree_skb(skb); 1259 kfree_skb(skb);
1250 return 0; 1260 return 0;
1251} 1261}
@@ -1255,12 +1265,11 @@ int udp_rcv(struct sk_buff *skb)
1255 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); 1265 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
1256} 1266}
1257 1267
1258int udp_destroy_sock(struct sock *sk) 1268void udp_destroy_sock(struct sock *sk)
1259{ 1269{
1260 lock_sock(sk); 1270 lock_sock(sk);
1261 udp_flush_pending_frames(sk); 1271 udp_flush_pending_frames(sk);
1262 release_sock(sk); 1272 release_sock(sk);
1263 return 0;
1264} 1273}
1265 1274
1266/* 1275/*
@@ -1453,7 +1462,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1453 spin_lock_bh(&rcvq->lock); 1462 spin_lock_bh(&rcvq->lock);
1454 while ((skb = skb_peek(rcvq)) != NULL && 1463 while ((skb = skb_peek(rcvq)) != NULL &&
1455 udp_lib_checksum_complete(skb)) { 1464 udp_lib_checksum_complete(skb)) {
1456 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); 1465 UDP_INC_STATS_BH(sock_net(sk),
1466 UDP_MIB_INERRORS, is_lite);
1457 __skb_unlink(skb, rcvq); 1467 __skb_unlink(skb, rcvq);
1458 kfree_skb(skb); 1468 kfree_skb(skb);
1459 } 1469 }
@@ -1629,12 +1639,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1629 __u16 srcp = ntohs(inet->sport); 1639 __u16 srcp = ntohs(inet->sport);
1630 1640
1631 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 1641 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
1632 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p%n", 1642 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
1633 bucket, src, srcp, dest, destp, sp->sk_state, 1643 bucket, src, srcp, dest, destp, sp->sk_state,
1634 atomic_read(&sp->sk_wmem_alloc), 1644 atomic_read(&sp->sk_wmem_alloc),
1635 atomic_read(&sp->sk_rmem_alloc), 1645 atomic_read(&sp->sk_rmem_alloc),
1636 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 1646 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1637 atomic_read(&sp->sk_refcnt), sp, len); 1647 atomic_read(&sp->sk_refcnt), sp,
1648 atomic_read(&sp->sk_drops), len);
1638} 1649}
1639 1650
1640int udp4_seq_show(struct seq_file *seq, void *v) 1651int udp4_seq_show(struct seq_file *seq, void *v)
@@ -1643,7 +1654,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
1643 seq_printf(seq, "%-127s\n", 1654 seq_printf(seq, "%-127s\n",
1644 " sl local_address rem_address st tx_queue " 1655 " sl local_address rem_address st tx_queue "
1645 "rx_queue tr tm->when retrnsmt uid timeout " 1656 "rx_queue tr tm->when retrnsmt uid timeout "
1646 "inode"); 1657 "inode ref pointer drops");
1647 else { 1658 else {
1648 struct udp_iter_state *state = seq->private; 1659 struct udp_iter_state *state = seq->private;
1649 int len; 1660 int len;
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 7288bf7977fb..2e9bad2fa1bc 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -26,7 +26,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
26extern int udp_sendpage(struct sock *sk, struct page *page, int offset, 26extern int udp_sendpage(struct sock *sk, struct page *page, int offset,
27 size_t size, int flags); 27 size_t size, int flags);
28extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 28extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
29extern int udp_destroy_sock(struct sock *sk); 29extern void udp_destroy_sock(struct sock *sk);
30 30
31#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
32extern int udp4_seq_show(struct seq_file *seq, void *v); 32extern int udp4_seq_show(struct seq_file *seq, void *v);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 72ce26b6c4d3..4ad16b6d5138 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). 2 * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828).
3 * 3 *
4 * Version: $Id: udplite.c,v 1.25 2006/10/19 07:22:36 gerrit Exp $
5 *
6 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> 4 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk>
7 * 5 *
8 * Changes: 6 * Changes:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ff61a5cdb0b3..30184e0dd74c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: addrconf.c,v 1.69 2001/10/31 21:55:54 davem Exp $
10 *
11 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -121,6 +119,7 @@ static void ipv6_regen_rndid(unsigned long data);
121static int desync_factor = MAX_DESYNC_FACTOR * HZ; 119static int desync_factor = MAX_DESYNC_FACTOR * HZ;
122#endif 120#endif
123 121
122static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
124static int ipv6_count_addresses(struct inet6_dev *idev); 123static int ipv6_count_addresses(struct inet6_dev *idev);
125 124
126/* 125/*
@@ -185,6 +184,8 @@ struct ipv6_devconf ipv6_devconf __read_mostly = {
185#endif 184#endif
186 .proxy_ndp = 0, 185 .proxy_ndp = 0,
187 .accept_source_route = 0, /* we do not accept RH0 by default. */ 186 .accept_source_route = 0, /* we do not accept RH0 by default. */
187 .disable_ipv6 = 0,
188 .accept_dad = 1,
188}; 189};
189 190
190static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { 191static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -217,6 +218,8 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
217#endif 218#endif
218 .proxy_ndp = 0, 219 .proxy_ndp = 0,
219 .accept_source_route = 0, /* we do not accept RH0 by default. */ 220 .accept_source_route = 0, /* we do not accept RH0 by default. */
221 .disable_ipv6 = 0,
222 .accept_dad = 1,
220}; 223};
221 224
222/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ 225/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
@@ -226,9 +229,15 @@ const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_IN
226const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; 229const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
227 230
228/* Check if a valid qdisc is available */ 231/* Check if a valid qdisc is available */
229static inline int addrconf_qdisc_ok(struct net_device *dev) 232static inline bool addrconf_qdisc_ok(const struct net_device *dev)
233{
234 return !qdisc_tx_is_noop(dev);
235}
236
237/* Check if a route is valid prefix route */
238static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
230{ 239{
231 return (dev->qdisc != &noop_qdisc); 240 return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0);
232} 241}
233 242
234static void addrconf_del_timer(struct inet6_ifaddr *ifp) 243static void addrconf_del_timer(struct inet6_ifaddr *ifp)
@@ -344,6 +353,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
344 kfree(ndev); 353 kfree(ndev);
345 return NULL; 354 return NULL;
346 } 355 }
356 if (ndev->cnf.forwarding)
357 dev_disable_lro(dev);
347 /* We refer to the device */ 358 /* We refer to the device */
348 dev_hold(dev); 359 dev_hold(dev);
349 360
@@ -372,6 +383,9 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
372 */ 383 */
373 in6_dev_hold(ndev); 384 in6_dev_hold(ndev);
374 385
386 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
387 ndev->cnf.accept_dad = -1;
388
375#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 389#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
376 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { 390 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
377 printk(KERN_INFO 391 printk(KERN_INFO
@@ -438,6 +452,8 @@ static void dev_forward_change(struct inet6_dev *idev)
438 if (!idev) 452 if (!idev)
439 return; 453 return;
440 dev = idev->dev; 454 dev = idev->dev;
455 if (idev->cnf.forwarding)
456 dev_disable_lro(dev);
441 if (dev && (dev->flags & IFF_MULTICAST)) { 457 if (dev && (dev->flags & IFF_MULTICAST)) {
442 if (idev->cnf.forwarding) 458 if (idev->cnf.forwarding)
443 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 459 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
@@ -483,12 +499,14 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
483 if (p == &net->ipv6.devconf_dflt->forwarding) 499 if (p == &net->ipv6.devconf_dflt->forwarding)
484 return; 500 return;
485 501
502 rtnl_lock();
486 if (p == &net->ipv6.devconf_all->forwarding) { 503 if (p == &net->ipv6.devconf_all->forwarding) {
487 __s32 newf = net->ipv6.devconf_all->forwarding; 504 __s32 newf = net->ipv6.devconf_all->forwarding;
488 net->ipv6.devconf_dflt->forwarding = newf; 505 net->ipv6.devconf_dflt->forwarding = newf;
489 addrconf_forward_change(net, newf); 506 addrconf_forward_change(net, newf);
490 } else if ((!*p) ^ (!old)) 507 } else if ((!*p) ^ (!old))
491 dev_forward_change((struct inet6_dev *)table->extra1); 508 dev_forward_change((struct inet6_dev *)table->extra1);
509 rtnl_unlock();
492 510
493 if (*p) 511 if (*p)
494 rt6_purge_dflt_routers(net); 512 rt6_purge_dflt_routers(net);
@@ -568,6 +586,13 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
568 struct rt6_info *rt; 586 struct rt6_info *rt;
569 int hash; 587 int hash;
570 int err = 0; 588 int err = 0;
589 int addr_type = ipv6_addr_type(addr);
590
591 if (addr_type == IPV6_ADDR_ANY ||
592 addr_type & IPV6_ADDR_MULTICAST ||
593 (!(idev->dev->flags & IFF_LOOPBACK) &&
594 addr_type & IPV6_ADDR_LOOPBACK))
595 return ERR_PTR(-EADDRNOTAVAIL);
571 596
572 rcu_read_lock_bh(); 597 rcu_read_lock_bh();
573 if (idev->dead) { 598 if (idev->dead) {
@@ -777,7 +802,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
777 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 802 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
778 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); 803 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
779 804
780 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 805 if (rt && addrconf_is_prefix_route(rt)) {
781 if (onlink == 0) { 806 if (onlink == 0) {
782 ip6_del_rt(rt); 807 ip6_del_rt(rt);
783 rt = NULL; 808 rt = NULL;
@@ -958,7 +983,8 @@ static inline int ipv6_saddr_preferred(int type)
958 return 0; 983 return 0;
959} 984}
960 985
961static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score, 986static int ipv6_get_saddr_eval(struct net *net,
987 struct ipv6_saddr_score *score,
962 struct ipv6_saddr_dst *dst, 988 struct ipv6_saddr_dst *dst,
963 int i) 989 int i)
964{ 990{
@@ -1037,7 +1063,8 @@ static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score,
1037 break; 1063 break;
1038 case IPV6_SADDR_RULE_LABEL: 1064 case IPV6_SADDR_RULE_LABEL:
1039 /* Rule 6: Prefer matching label */ 1065 /* Rule 6: Prefer matching label */
1040 ret = ipv6_addr_label(&score->ifa->addr, score->addr_type, 1066 ret = ipv6_addr_label(net,
1067 &score->ifa->addr, score->addr_type,
1041 score->ifa->idev->dev->ifindex) == dst->label; 1068 score->ifa->idev->dev->ifindex) == dst->label;
1042 break; 1069 break;
1043#ifdef CONFIG_IPV6_PRIVACY 1070#ifdef CONFIG_IPV6_PRIVACY
@@ -1091,7 +1118,7 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev,
1091 dst.addr = daddr; 1118 dst.addr = daddr;
1092 dst.ifindex = dst_dev ? dst_dev->ifindex : 0; 1119 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1093 dst.scope = __ipv6_addr_src_scope(dst_type); 1120 dst.scope = __ipv6_addr_src_scope(dst_type);
1094 dst.label = ipv6_addr_label(daddr, dst_type, dst.ifindex); 1121 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1095 dst.prefs = prefs; 1122 dst.prefs = prefs;
1096 1123
1097 hiscore->rule = -1; 1124 hiscore->rule = -1;
@@ -1159,8 +1186,8 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev,
1159 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) { 1186 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1160 int minihiscore, miniscore; 1187 int minihiscore, miniscore;
1161 1188
1162 minihiscore = ipv6_get_saddr_eval(hiscore, &dst, i); 1189 minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
1163 miniscore = ipv6_get_saddr_eval(score, &dst, i); 1190 miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
1164 1191
1165 if (minihiscore > miniscore) { 1192 if (minihiscore > miniscore) {
1166 if (i == IPV6_SADDR_RULE_SCOPE && 1193 if (i == IPV6_SADDR_RULE_SCOPE &&
@@ -1400,6 +1427,20 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp)
1400 1427
1401void addrconf_dad_failure(struct inet6_ifaddr *ifp) 1428void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1402{ 1429{
1430 struct inet6_dev *idev = ifp->idev;
1431 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
1432 struct in6_addr addr;
1433
1434 addr.s6_addr32[0] = htonl(0xfe800000);
1435 addr.s6_addr32[1] = 0;
1436
1437 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
1438 ipv6_addr_equal(&ifp->addr, &addr)) {
1439 /* DAD failed for link-local based on MAC address */
1440 idev->cnf.disable_ipv6 = 1;
1441 }
1442 }
1443
1403 if (net_ratelimit()) 1444 if (net_ratelimit())
1404 printk(KERN_INFO "%s: duplicate address detected!\n", ifp->idev->dev->name); 1445 printk(KERN_INFO "%s: duplicate address detected!\n", ifp->idev->dev->name);
1405 addrconf_dad_stop(ifp); 1446 addrconf_dad_stop(ifp);
@@ -1788,7 +1829,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1788 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1829 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL,
1789 dev->ifindex, 1); 1830 dev->ifindex, 1);
1790 1831
1791 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 1832 if (rt && addrconf_is_prefix_route(rt)) {
1792 /* Autoconf prefix route */ 1833 /* Autoconf prefix route */
1793 if (valid_lft == 0) { 1834 if (valid_lft == 0) {
1794 ip6_del_rt(rt); 1835 ip6_del_rt(rt);
@@ -2732,6 +2773,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2732 spin_lock_bh(&ifp->lock); 2773 spin_lock_bh(&ifp->lock);
2733 2774
2734 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 2775 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
2776 idev->cnf.accept_dad < 1 ||
2735 !(ifp->flags&IFA_F_TENTATIVE) || 2777 !(ifp->flags&IFA_F_TENTATIVE) ||
2736 ifp->flags & IFA_F_NODAD) { 2778 ifp->flags & IFA_F_NODAD) {
2737 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); 2779 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC);
@@ -2779,6 +2821,11 @@ static void addrconf_dad_timer(unsigned long data)
2779 read_unlock_bh(&idev->lock); 2821 read_unlock_bh(&idev->lock);
2780 goto out; 2822 goto out;
2781 } 2823 }
2824 if (idev->cnf.accept_dad > 1 && idev->cnf.disable_ipv6) {
2825 read_unlock_bh(&idev->lock);
2826 addrconf_dad_failure(ifp);
2827 return;
2828 }
2782 spin_lock_bh(&ifp->lock); 2829 spin_lock_bh(&ifp->lock);
2783 if (ifp->probes == 0) { 2830 if (ifp->probes == 0) {
2784 /* 2831 /*
@@ -3638,6 +3685,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3638#ifdef CONFIG_IPV6_MROUTE 3685#ifdef CONFIG_IPV6_MROUTE
3639 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; 3686 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
3640#endif 3687#endif
3688 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
3689 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
3641} 3690}
3642 3691
3643static inline size_t inet6_if_nlmsg_size(void) 3692static inline size_t inet6_if_nlmsg_size(void)
@@ -4197,6 +4246,22 @@ static struct addrconf_sysctl_table
4197 }, 4246 },
4198#endif 4247#endif
4199 { 4248 {
4249 .ctl_name = CTL_UNNUMBERED,
4250 .procname = "disable_ipv6",
4251 .data = &ipv6_devconf.disable_ipv6,
4252 .maxlen = sizeof(int),
4253 .mode = 0644,
4254 .proc_handler = &proc_dointvec,
4255 },
4256 {
4257 .ctl_name = CTL_UNNUMBERED,
4258 .procname = "accept_dad",
4259 .data = &ipv6_devconf.accept_dad,
4260 .maxlen = sizeof(int),
4261 .mode = 0644,
4262 .proc_handler = &proc_dointvec,
4263 },
4264 {
4200 .ctl_name = 0, /* sentinel */ 4265 .ctl_name = 0, /* sentinel */
4201 } 4266 }
4202 }, 4267 },
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 9bfa8846f262..08909039d87b 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -29,6 +29,9 @@
29 */ 29 */
30struct ip6addrlbl_entry 30struct ip6addrlbl_entry
31{ 31{
32#ifdef CONFIG_NET_NS
33 struct net *lbl_net;
34#endif
32 struct in6_addr prefix; 35 struct in6_addr prefix;
33 int prefixlen; 36 int prefixlen;
34 int ifindex; 37 int ifindex;
@@ -46,6 +49,16 @@ static struct ip6addrlbl_table
46 u32 seq; 49 u32 seq;
47} ip6addrlbl_table; 50} ip6addrlbl_table;
48 51
52static inline
53struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
54{
55#ifdef CONFIG_NET_NS
56 return lbl->lbl_net;
57#else
58 return &init_net;
59#endif
60}
61
49/* 62/*
50 * Default policy table (RFC3484 + extensions) 63 * Default policy table (RFC3484 + extensions)
51 * 64 *
@@ -65,7 +78,7 @@ static struct ip6addrlbl_table
65 78
66#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL 79#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL
67 80
68static const __initdata struct ip6addrlbl_init_table 81static const __net_initdata struct ip6addrlbl_init_table
69{ 82{
70 const struct in6_addr *prefix; 83 const struct in6_addr *prefix;
71 int prefixlen; 84 int prefixlen;
@@ -108,6 +121,9 @@ static const __initdata struct ip6addrlbl_init_table
108/* Object management */ 121/* Object management */
109static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) 122static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p)
110{ 123{
124#ifdef CONFIG_NET_NS
125 release_net(p->lbl_net);
126#endif
111 kfree(p); 127 kfree(p);
112} 128}
113 129
@@ -128,10 +144,13 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p)
128} 144}
129 145
130/* Find label */ 146/* Find label */
131static int __ip6addrlbl_match(struct ip6addrlbl_entry *p, 147static int __ip6addrlbl_match(struct net *net,
148 struct ip6addrlbl_entry *p,
132 const struct in6_addr *addr, 149 const struct in6_addr *addr,
133 int addrtype, int ifindex) 150 int addrtype, int ifindex)
134{ 151{
152 if (!net_eq(ip6addrlbl_net(p), net))
153 return 0;
135 if (p->ifindex && p->ifindex != ifindex) 154 if (p->ifindex && p->ifindex != ifindex)
136 return 0; 155 return 0;
137 if (p->addrtype && p->addrtype != addrtype) 156 if (p->addrtype && p->addrtype != addrtype)
@@ -141,19 +160,21 @@ static int __ip6addrlbl_match(struct ip6addrlbl_entry *p,
141 return 1; 160 return 1;
142} 161}
143 162
144static struct ip6addrlbl_entry *__ipv6_addr_label(const struct in6_addr *addr, 163static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
164 const struct in6_addr *addr,
145 int type, int ifindex) 165 int type, int ifindex)
146{ 166{
147 struct hlist_node *pos; 167 struct hlist_node *pos;
148 struct ip6addrlbl_entry *p; 168 struct ip6addrlbl_entry *p;
149 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 169 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
150 if (__ip6addrlbl_match(p, addr, type, ifindex)) 170 if (__ip6addrlbl_match(net, p, addr, type, ifindex))
151 return p; 171 return p;
152 } 172 }
153 return NULL; 173 return NULL;
154} 174}
155 175
156u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex) 176u32 ipv6_addr_label(struct net *net,
177 const struct in6_addr *addr, int type, int ifindex)
157{ 178{
158 u32 label; 179 u32 label;
159 struct ip6addrlbl_entry *p; 180 struct ip6addrlbl_entry *p;
@@ -161,7 +182,7 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex)
161 type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK; 182 type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK;
162 183
163 rcu_read_lock(); 184 rcu_read_lock();
164 p = __ipv6_addr_label(addr, type, ifindex); 185 p = __ipv6_addr_label(net, addr, type, ifindex);
165 label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; 186 label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT;
166 rcu_read_unlock(); 187 rcu_read_unlock();
167 188
@@ -174,7 +195,8 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex)
174} 195}
175 196
176/* allocate one entry */ 197/* allocate one entry */
177static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix, 198static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
199 const struct in6_addr *prefix,
178 int prefixlen, int ifindex, 200 int prefixlen, int ifindex,
179 u32 label) 201 u32 label)
180{ 202{
@@ -216,6 +238,9 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix,
216 newp->addrtype = addrtype; 238 newp->addrtype = addrtype;
217 newp->label = label; 239 newp->label = label;
218 INIT_HLIST_NODE(&newp->list); 240 INIT_HLIST_NODE(&newp->list);
241#ifdef CONFIG_NET_NS
242 newp->lbl_net = hold_net(net);
243#endif
219 atomic_set(&newp->refcnt, 1); 244 atomic_set(&newp->refcnt, 1);
220 return newp; 245 return newp;
221} 246}
@@ -237,6 +262,7 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
237 hlist_for_each_entry_safe(p, pos, n, 262 hlist_for_each_entry_safe(p, pos, n,
238 &ip6addrlbl_table.head, list) { 263 &ip6addrlbl_table.head, list) {
239 if (p->prefixlen == newp->prefixlen && 264 if (p->prefixlen == newp->prefixlen &&
265 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
240 p->ifindex == newp->ifindex && 266 p->ifindex == newp->ifindex &&
241 ipv6_addr_equal(&p->prefix, &newp->prefix)) { 267 ipv6_addr_equal(&p->prefix, &newp->prefix)) {
242 if (!replace) { 268 if (!replace) {
@@ -261,7 +287,8 @@ out:
261} 287}
262 288
263/* add a label */ 289/* add a label */
264static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen, 290static int ip6addrlbl_add(struct net *net,
291 const struct in6_addr *prefix, int prefixlen,
265 int ifindex, u32 label, int replace) 292 int ifindex, u32 label, int replace)
266{ 293{
267 struct ip6addrlbl_entry *newp; 294 struct ip6addrlbl_entry *newp;
@@ -274,7 +301,7 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen,
274 (unsigned int)label, 301 (unsigned int)label,
275 replace); 302 replace);
276 303
277 newp = ip6addrlbl_alloc(prefix, prefixlen, ifindex, label); 304 newp = ip6addrlbl_alloc(net, prefix, prefixlen, ifindex, label);
278 if (IS_ERR(newp)) 305 if (IS_ERR(newp))
279 return PTR_ERR(newp); 306 return PTR_ERR(newp);
280 spin_lock(&ip6addrlbl_table.lock); 307 spin_lock(&ip6addrlbl_table.lock);
@@ -286,7 +313,8 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen,
286} 313}
287 314
288/* remove a label */ 315/* remove a label */
289static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, 316static int __ip6addrlbl_del(struct net *net,
317 const struct in6_addr *prefix, int prefixlen,
290 int ifindex) 318 int ifindex)
291{ 319{
292 struct ip6addrlbl_entry *p = NULL; 320 struct ip6addrlbl_entry *p = NULL;
@@ -300,6 +328,7 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
300 328
301 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { 329 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
302 if (p->prefixlen == prefixlen && 330 if (p->prefixlen == prefixlen &&
331 net_eq(ip6addrlbl_net(p), net) &&
303 p->ifindex == ifindex && 332 p->ifindex == ifindex &&
304 ipv6_addr_equal(&p->prefix, prefix)) { 333 ipv6_addr_equal(&p->prefix, prefix)) {
305 hlist_del_rcu(&p->list); 334 hlist_del_rcu(&p->list);
@@ -311,7 +340,8 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
311 return ret; 340 return ret;
312} 341}
313 342
314static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, 343static int ip6addrlbl_del(struct net *net,
344 const struct in6_addr *prefix, int prefixlen,
315 int ifindex) 345 int ifindex)
316{ 346{
317 struct in6_addr prefix_buf; 347 struct in6_addr prefix_buf;
@@ -324,13 +354,13 @@ static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
324 354
325 ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); 355 ipv6_addr_prefix(&prefix_buf, prefix, prefixlen);
326 spin_lock(&ip6addrlbl_table.lock); 356 spin_lock(&ip6addrlbl_table.lock);
327 ret = __ip6addrlbl_del(&prefix_buf, prefixlen, ifindex); 357 ret = __ip6addrlbl_del(net, &prefix_buf, prefixlen, ifindex);
328 spin_unlock(&ip6addrlbl_table.lock); 358 spin_unlock(&ip6addrlbl_table.lock);
329 return ret; 359 return ret;
330} 360}
331 361
332/* add default label */ 362/* add default label */
333static __init int ip6addrlbl_init(void) 363static int __net_init ip6addrlbl_net_init(struct net *net)
334{ 364{
335 int err = 0; 365 int err = 0;
336 int i; 366 int i;
@@ -338,7 +368,8 @@ static __init int ip6addrlbl_init(void)
338 ADDRLABEL(KERN_DEBUG "%s()\n", __func__); 368 ADDRLABEL(KERN_DEBUG "%s()\n", __func__);
339 369
340 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { 370 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
341 int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix, 371 int ret = ip6addrlbl_add(net,
372 ip6addrlbl_init_table[i].prefix,
342 ip6addrlbl_init_table[i].prefixlen, 373 ip6addrlbl_init_table[i].prefixlen,
343 0, 374 0,
344 ip6addrlbl_init_table[i].label, 0); 375 ip6addrlbl_init_table[i].label, 0);
@@ -349,11 +380,32 @@ static __init int ip6addrlbl_init(void)
349 return err; 380 return err;
350} 381}
351 382
383static void __net_exit ip6addrlbl_net_exit(struct net *net)
384{
385 struct ip6addrlbl_entry *p = NULL;
386 struct hlist_node *pos, *n;
387
388 /* Remove all labels belonging to the exiting net */
389 spin_lock(&ip6addrlbl_table.lock);
390 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
391 if (net_eq(ip6addrlbl_net(p), net)) {
392 hlist_del_rcu(&p->list);
393 ip6addrlbl_put(p);
394 }
395 }
396 spin_unlock(&ip6addrlbl_table.lock);
397}
398
399static struct pernet_operations ipv6_addr_label_ops = {
400 .init = ip6addrlbl_net_init,
401 .exit = ip6addrlbl_net_exit,
402};
403
352int __init ipv6_addr_label_init(void) 404int __init ipv6_addr_label_init(void)
353{ 405{
354 spin_lock_init(&ip6addrlbl_table.lock); 406 spin_lock_init(&ip6addrlbl_table.lock);
355 407
356 return ip6addrlbl_init(); 408 return register_pernet_subsys(&ipv6_addr_label_ops);
357} 409}
358 410
359static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 411static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
@@ -371,9 +423,6 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
371 u32 label; 423 u32 label;
372 int err = 0; 424 int err = 0;
373 425
374 if (net != &init_net)
375 return 0;
376
377 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); 426 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy);
378 if (err < 0) 427 if (err < 0)
379 return err; 428 return err;
@@ -385,7 +434,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
385 return -EINVAL; 434 return -EINVAL;
386 435
387 if (ifal->ifal_index && 436 if (ifal->ifal_index &&
388 !__dev_get_by_index(&init_net, ifal->ifal_index)) 437 !__dev_get_by_index(net, ifal->ifal_index))
389 return -EINVAL; 438 return -EINVAL;
390 439
391 if (!tb[IFAL_ADDRESS]) 440 if (!tb[IFAL_ADDRESS])
@@ -403,12 +452,12 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
403 452
404 switch(nlh->nlmsg_type) { 453 switch(nlh->nlmsg_type) {
405 case RTM_NEWADDRLABEL: 454 case RTM_NEWADDRLABEL:
406 err = ip6addrlbl_add(pfx, ifal->ifal_prefixlen, 455 err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen,
407 ifal->ifal_index, label, 456 ifal->ifal_index, label,
408 nlh->nlmsg_flags & NLM_F_REPLACE); 457 nlh->nlmsg_flags & NLM_F_REPLACE);
409 break; 458 break;
410 case RTM_DELADDRLABEL: 459 case RTM_DELADDRLABEL:
411 err = ip6addrlbl_del(pfx, ifal->ifal_prefixlen, 460 err = ip6addrlbl_del(net, pfx, ifal->ifal_prefixlen,
412 ifal->ifal_index); 461 ifal->ifal_index);
413 break; 462 break;
414 default: 463 default:
@@ -458,12 +507,10 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
458 int idx = 0, s_idx = cb->args[0]; 507 int idx = 0, s_idx = cb->args[0];
459 int err; 508 int err;
460 509
461 if (net != &init_net)
462 return 0;
463
464 rcu_read_lock(); 510 rcu_read_lock();
465 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 511 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
466 if (idx >= s_idx) { 512 if (idx >= s_idx &&
513 net_eq(ip6addrlbl_net(p), net)) {
467 if ((err = ip6addrlbl_fill(skb, p, 514 if ((err = ip6addrlbl_fill(skb, p,
468 ip6addrlbl_table.seq, 515 ip6addrlbl_table.seq,
469 NETLINK_CB(cb->skb).pid, 516 NETLINK_CB(cb->skb).pid,
@@ -499,9 +546,6 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
499 struct ip6addrlbl_entry *p; 546 struct ip6addrlbl_entry *p;
500 struct sk_buff *skb; 547 struct sk_buff *skb;
501 548
502 if (net != &init_net)
503 return 0;
504
505 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); 549 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy);
506 if (err < 0) 550 if (err < 0)
507 return err; 551 return err;
@@ -513,7 +557,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
513 return -EINVAL; 557 return -EINVAL;
514 558
515 if (ifal->ifal_index && 559 if (ifal->ifal_index &&
516 !__dev_get_by_index(&init_net, ifal->ifal_index)) 560 !__dev_get_by_index(net, ifal->ifal_index))
517 return -EINVAL; 561 return -EINVAL;
518 562
519 if (!tb[IFAL_ADDRESS]) 563 if (!tb[IFAL_ADDRESS])
@@ -524,7 +568,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
524 return -EINVAL; 568 return -EINVAL;
525 569
526 rcu_read_lock(); 570 rcu_read_lock();
527 p = __ipv6_addr_label(addr, ipv6_addr_type(addr), ifal->ifal_index); 571 p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
528 if (p && ip6addrlbl_hold(p)) 572 if (p && ip6addrlbl_hold(p))
529 p = NULL; 573 p = NULL;
530 lseq = ip6addrlbl_table.seq; 574 lseq = ip6addrlbl_table.seq;
@@ -552,7 +596,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
552 goto out; 596 goto out;
553 } 597 }
554 598
555 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 599 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
556out: 600out:
557 return err; 601 return err;
558} 602}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e84b3fd17fb4..3d828bc4b1cf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Adapted from linux/net/ipv4/af_inet.c 8 * Adapted from linux/net/ipv4/af_inet.c
9 * 9 *
10 * $Id: af_inet6.c,v 1.66 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * piggy, Karl Knutson : Socket protocol table 11 * piggy, Karl Knutson : Socket protocol table
14 * Hideaki YOSHIFUJI : sin6_scope_id support 12 * Hideaki YOSHIFUJI : sin6_scope_id support
@@ -61,9 +59,7 @@
61 59
62#include <asm/uaccess.h> 60#include <asm/uaccess.h>
63#include <asm/system.h> 61#include <asm/system.h>
64#ifdef CONFIG_IPV6_MROUTE
65#include <linux/mroute6.h> 62#include <linux/mroute6.h>
66#endif
67 63
68MODULE_AUTHOR("Cast of dozens"); 64MODULE_AUTHOR("Cast of dozens");
69MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 65MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
@@ -373,7 +369,7 @@ int inet6_release(struct socket *sock)
373 369
374EXPORT_SYMBOL(inet6_release); 370EXPORT_SYMBOL(inet6_release);
375 371
376int inet6_destroy_sock(struct sock *sk) 372void inet6_destroy_sock(struct sock *sk)
377{ 373{
378 struct ipv6_pinfo *np = inet6_sk(sk); 374 struct ipv6_pinfo *np = inet6_sk(sk);
379 struct sk_buff *skb; 375 struct sk_buff *skb;
@@ -391,8 +387,6 @@ int inet6_destroy_sock(struct sock *sk)
391 387
392 if ((opt = xchg(&np->opt, NULL)) != NULL) 388 if ((opt = xchg(&np->opt, NULL)) != NULL)
393 sock_kfree_s(sk, opt, opt->tot_len); 389 sock_kfree_s(sk, opt, opt->tot_len);
394
395 return 0;
396} 390}
397 391
398EXPORT_SYMBOL_GPL(inet6_destroy_sock); 392EXPORT_SYMBOL_GPL(inet6_destroy_sock);
@@ -956,9 +950,9 @@ static int __init inet6_init(void)
956 err = icmpv6_init(); 950 err = icmpv6_init();
957 if (err) 951 if (err)
958 goto icmp_fail; 952 goto icmp_fail;
959#ifdef CONFIG_IPV6_MROUTE 953 err = ip6_mr_init();
960 ip6_mr_init(); 954 if (err)
961#endif 955 goto ipmr_fail;
962 err = ndisc_init(); 956 err = ndisc_init();
963 if (err) 957 if (err)
964 goto ndisc_fail; 958 goto ndisc_fail;
@@ -1061,6 +1055,8 @@ netfilter_fail:
1061igmp_fail: 1055igmp_fail:
1062 ndisc_cleanup(); 1056 ndisc_cleanup();
1063ndisc_fail: 1057ndisc_fail:
1058 ip6_mr_cleanup();
1059ipmr_fail:
1064 icmpv6_cleanup(); 1060 icmpv6_cleanup();
1065icmp_fail: 1061icmp_fail:
1066 unregister_pernet_subsys(&inet6_net_ops); 1062 unregister_pernet_subsys(&inet6_net_ops);
@@ -1115,6 +1111,7 @@ static void __exit inet6_exit(void)
1115 ipv6_netfilter_fini(); 1111 ipv6_netfilter_fini();
1116 igmp6_cleanup(); 1112 igmp6_cleanup();
1117 ndisc_cleanup(); 1113 ndisc_cleanup();
1114 ip6_mr_cleanup();
1118 icmpv6_cleanup(); 1115 icmpv6_cleanup();
1119 rawv6_exit(); 1116 rawv6_exit();
1120 1117
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 0f0f94a40335..f7b535dec860 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: datagram.c,v 1.24 2002/02/01 22:01:04 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index dcf94fdfb863..9f1084b4c0e8 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -7,8 +7,6 @@
7 * Andi Kleen <ak@muc.de> 7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 * 9 *
10 * $Id: exthdrs.c,v 1.13 2001/06/19 15:58:56 davem Exp $
11 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index d42dd16d3487..abedf95fdf2d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
9 *
10 * Based on net/ipv4/icmp.c 8 * Based on net/ipv4/icmp.c
11 * 9 *
12 * RFC 1885 10 * RFC 1885
@@ -956,7 +954,8 @@ ctl_table ipv6_icmp_table_template[] = {
956 .data = &init_net.ipv6.sysctl.icmpv6_time, 954 .data = &init_net.ipv6.sysctl.icmpv6_time,
957 .maxlen = sizeof(int), 955 .maxlen = sizeof(int),
958 .mode = 0644, 956 .mode = 0644,
959 .proc_handler = &proc_dointvec 957 .proc_handler = &proc_dointvec_ms_jiffies,
958 .strategy = &sysctl_ms_jiffies
960 }, 959 },
961 { .ctl_name = 0 }, 960 { .ctl_name = 0 },
962}; 961};
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 580014aea4d6..00a8a5f9380c 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -68,7 +68,7 @@ struct sock *__inet6_lookup_established(struct net *net,
68 /* Optimize here for direct hit, only listening connections can 68 /* Optimize here for direct hit, only listening connections can
69 * have wildcards anyways. 69 * have wildcards anyways.
70 */ 70 */
71 unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); 71 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
72 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); 72 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
73 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); 73 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
74 74
@@ -104,7 +104,8 @@ struct sock *inet6_lookup_listener(struct net *net,
104 int score, hiscore = 0; 104 int score, hiscore = 0;
105 105
106 read_lock(&hashinfo->lhash_lock); 106 read_lock(&hashinfo->lhash_lock);
107 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { 107 sk_for_each(sk, node,
108 &hashinfo->listening_hash[inet_lhashfn(net, hnum)]) {
108 if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && 109 if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum &&
109 sk->sk_family == PF_INET6) { 110 sk->sk_family == PF_INET6) {
110 const struct ipv6_pinfo *np = inet6_sk(sk); 111 const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -165,14 +166,14 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
165 const struct in6_addr *saddr = &np->daddr; 166 const struct in6_addr *saddr = &np->daddr;
166 const int dif = sk->sk_bound_dev_if; 167 const int dif = sk->sk_bound_dev_if;
167 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); 168 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
168 const unsigned int hash = inet6_ehashfn(daddr, lport, saddr, 169 struct net *net = sock_net(sk);
170 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr,
169 inet->dport); 171 inet->dport);
170 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 172 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
171 rwlock_t *lock = inet_ehash_lockp(hinfo, hash); 173 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
172 struct sock *sk2; 174 struct sock *sk2;
173 const struct hlist_node *node; 175 const struct hlist_node *node;
174 struct inet_timewait_sock *tw; 176 struct inet_timewait_sock *tw;
175 struct net *net = sock_net(sk);
176 177
177 prefetch(head->chain.first); 178 prefetch(head->chain.first);
178 write_lock(lock); 179 write_lock(lock);
@@ -209,11 +210,11 @@ unique:
209 210
210 if (twp != NULL) { 211 if (twp != NULL) {
211 *twp = tw; 212 *twp = tw;
212 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 213 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
213 } else if (tw != NULL) { 214 } else if (tw != NULL) {
214 /* Silly. Should hash-dance instead... */ 215 /* Silly. Should hash-dance instead... */
215 inet_twsk_deschedule(tw, death_row); 216 inet_twsk_deschedule(tw, death_row);
216 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 217 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
217 218
218 inet_twsk_put(tw); 219 inet_twsk_put(tw);
219 } 220 }
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1ee4fa17c129..4de2b9efcacb 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 17eb48b8e329..ea81c614dde2 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Ian P. Morris <I.P.Morris@soton.ac.uk> 7 * Ian P. Morris <I.P.Morris@soton.ac.uk>
8 * 8 *
9 * $Id: ip6_input.c,v 1.19 2000/12/13 18:31:50 davem Exp $
10 *
11 * Based in linux/net/ipv4/ip_input.c 9 * Based in linux/net/ipv4/ip_input.c
12 * 10 *
13 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
@@ -73,7 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
73 71
74 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); 72 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES);
75 73
76 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 74 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
75 !idev || unlikely(idev->cnf.disable_ipv6)) {
77 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); 76 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
78 rcu_read_unlock(); 77 rcu_read_unlock();
79 goto out; 78 goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 48cdce9c696c..0981c1ef3057 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on linux/net/ipv4/ip_output.c 8 * Based on linux/net/ipv4/ip_output.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -175,6 +173,13 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
175 173
176int ip6_output(struct sk_buff *skb) 174int ip6_output(struct sk_buff *skb)
177{ 175{
176 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
177 if (unlikely(idev->cnf.disable_ipv6)) {
178 IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
179 kfree_skb(skb);
180 return 0;
181 }
182
178 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 183 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
179 dst_allfrag(skb->dst)) 184 dst_allfrag(skb->dst))
180 return ip6_fragment(skb, ip6_output2); 185 return ip6_fragment(skb, ip6_output2);
@@ -409,6 +414,9 @@ int ip6_forward(struct sk_buff *skb)
409 if (ipv6_devconf.forwarding == 0) 414 if (ipv6_devconf.forwarding == 0)
410 goto error; 415 goto error;
411 416
417 if (skb_warn_if_lro(skb))
418 goto drop;
419
412 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { 420 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
413 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); 421 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
414 goto drop; 422 goto drop;
@@ -497,7 +505,8 @@ int ip6_forward(struct sk_buff *skb)
497 int addrtype = ipv6_addr_type(&hdr->saddr); 505 int addrtype = ipv6_addr_type(&hdr->saddr);
498 506
499 /* This check is security critical. */ 507 /* This check is security critical. */
500 if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK)) 508 if (addrtype == IPV6_ADDR_ANY ||
509 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
501 goto error; 510 goto error;
502 if (addrtype & IPV6_ADDR_LINKLOCAL) { 511 if (addrtype & IPV6_ADDR_LINKLOCAL) {
503 icmpv6_send(skb, ICMPV6_DEST_UNREACH, 512 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2bda3ba100b1..17c7b098cdb0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -6,8 +6,6 @@
6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 * 8 *
9 * $Id$
10 *
11 * Based on: 9 * Based on:
12 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
13 * 11 *
@@ -711,7 +709,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
711 } 709 }
712 710
713 if (!ip6_tnl_rcv_ctl(t)) { 711 if (!ip6_tnl_rcv_ctl(t)) {
714 t->stat.rx_dropped++; 712 t->dev->stats.rx_dropped++;
715 read_unlock(&ip6_tnl_lock); 713 read_unlock(&ip6_tnl_lock);
716 goto discard; 714 goto discard;
717 } 715 }
@@ -728,8 +726,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
728 726
729 dscp_ecn_decapsulate(t, ipv6h, skb); 727 dscp_ecn_decapsulate(t, ipv6h, skb);
730 728
731 t->stat.rx_packets++; 729 t->dev->stats.rx_packets++;
732 t->stat.rx_bytes += skb->len; 730 t->dev->stats.rx_bytes += skb->len;
733 netif_rx(skb); 731 netif_rx(skb);
734 read_unlock(&ip6_tnl_lock); 732 read_unlock(&ip6_tnl_lock);
735 return 0; 733 return 0;
@@ -849,7 +847,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
849 __u32 *pmtu) 847 __u32 *pmtu)
850{ 848{
851 struct ip6_tnl *t = netdev_priv(dev); 849 struct ip6_tnl *t = netdev_priv(dev);
852 struct net_device_stats *stats = &t->stat; 850 struct net_device_stats *stats = &t->dev->stats;
853 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 851 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
854 struct ipv6_tel_txoption opt; 852 struct ipv6_tel_txoption opt;
855 struct dst_entry *dst; 853 struct dst_entry *dst;
@@ -1043,11 +1041,11 @@ static int
1043ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1041ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1044{ 1042{
1045 struct ip6_tnl *t = netdev_priv(dev); 1043 struct ip6_tnl *t = netdev_priv(dev);
1046 struct net_device_stats *stats = &t->stat; 1044 struct net_device_stats *stats = &t->dev->stats;
1047 int ret; 1045 int ret;
1048 1046
1049 if (t->recursion++) { 1047 if (t->recursion++) {
1050 t->stat.collisions++; 1048 stats->collisions++;
1051 goto tx_err; 1049 goto tx_err;
1052 } 1050 }
1053 1051
@@ -1289,19 +1287,6 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1289} 1287}
1290 1288
1291/** 1289/**
1292 * ip6_tnl_get_stats - return the stats for tunnel device
1293 * @dev: virtual device associated with tunnel
1294 *
1295 * Return: stats for device
1296 **/
1297
1298static struct net_device_stats *
1299ip6_tnl_get_stats(struct net_device *dev)
1300{
1301 return &(((struct ip6_tnl *)netdev_priv(dev))->stat);
1302}
1303
1304/**
1305 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1290 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1306 * @dev: virtual device associated with tunnel 1291 * @dev: virtual device associated with tunnel
1307 * @new_mtu: the new mtu 1292 * @new_mtu: the new mtu
@@ -1334,7 +1319,6 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
1334 dev->uninit = ip6_tnl_dev_uninit; 1319 dev->uninit = ip6_tnl_dev_uninit;
1335 dev->destructor = free_netdev; 1320 dev->destructor = free_netdev;
1336 dev->hard_start_xmit = ip6_tnl_xmit; 1321 dev->hard_start_xmit = ip6_tnl_xmit;
1337 dev->get_stats = ip6_tnl_get_stats;
1338 dev->do_ioctl = ip6_tnl_ioctl; 1322 dev->do_ioctl = ip6_tnl_ioctl;
1339 dev->change_mtu = ip6_tnl_change_mtu; 1323 dev->change_mtu = ip6_tnl_change_mtu;
1340 1324
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 14796181e8b5..0b41aa2675f5 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -388,8 +388,8 @@ static int pim6_rcv(struct sk_buff *skb)
388 skb->ip_summed = 0; 388 skb->ip_summed = 0;
389 skb->pkt_type = PACKET_HOST; 389 skb->pkt_type = PACKET_HOST;
390 dst_release(skb->dst); 390 dst_release(skb->dst);
391 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_bytes += skb->len; 391 reg_dev->stats.rx_bytes += skb->len;
392 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_packets++; 392 reg_dev->stats.rx_packets++;
393 skb->dst = NULL; 393 skb->dst = NULL;
394 nf_reset(skb); 394 nf_reset(skb);
395 netif_rx(skb); 395 netif_rx(skb);
@@ -409,26 +409,20 @@ static struct inet6_protocol pim6_protocol = {
409static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 409static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
410{ 410{
411 read_lock(&mrt_lock); 411 read_lock(&mrt_lock);
412 ((struct net_device_stats *)netdev_priv(dev))->tx_bytes += skb->len; 412 dev->stats.tx_bytes += skb->len;
413 ((struct net_device_stats *)netdev_priv(dev))->tx_packets++; 413 dev->stats.tx_packets++;
414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT); 414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
415 read_unlock(&mrt_lock); 415 read_unlock(&mrt_lock);
416 kfree_skb(skb); 416 kfree_skb(skb);
417 return 0; 417 return 0;
418} 418}
419 419
420static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
421{
422 return (struct net_device_stats *)netdev_priv(dev);
423}
424
425static void reg_vif_setup(struct net_device *dev) 420static void reg_vif_setup(struct net_device *dev)
426{ 421{
427 dev->type = ARPHRD_PIMREG; 422 dev->type = ARPHRD_PIMREG;
428 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 423 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
429 dev->flags = IFF_NOARP; 424 dev->flags = IFF_NOARP;
430 dev->hard_start_xmit = reg_vif_xmit; 425 dev->hard_start_xmit = reg_vif_xmit;
431 dev->get_stats = reg_vif_get_stats;
432 dev->destructor = free_netdev; 426 dev->destructor = free_netdev;
433} 427}
434 428
@@ -436,9 +430,7 @@ static struct net_device *ip6mr_reg_vif(void)
436{ 430{
437 struct net_device *dev; 431 struct net_device *dev;
438 432
439 dev = alloc_netdev(sizeof(struct net_device_stats), "pim6reg", 433 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
440 reg_vif_setup);
441
442 if (dev == NULL) 434 if (dev == NULL)
443 return NULL; 435 return NULL;
444 436
@@ -451,6 +443,7 @@ static struct net_device *ip6mr_reg_vif(void)
451 if (dev_open(dev)) 443 if (dev_open(dev))
452 goto failure; 444 goto failure;
453 445
446 dev_hold(dev);
454 return dev; 447 return dev;
455 448
456failure: 449failure:
@@ -603,6 +596,7 @@ static int mif6_add(struct mif6ctl *vifc, int mrtsock)
603 int vifi = vifc->mif6c_mifi; 596 int vifi = vifc->mif6c_mifi;
604 struct mif_device *v = &vif6_table[vifi]; 597 struct mif_device *v = &vif6_table[vifi];
605 struct net_device *dev; 598 struct net_device *dev;
599 int err;
606 600
607 /* Is vif busy ? */ 601 /* Is vif busy ? */
608 if (MIF_EXISTS(vifi)) 602 if (MIF_EXISTS(vifi))
@@ -620,20 +614,28 @@ static int mif6_add(struct mif6ctl *vifc, int mrtsock)
620 dev = ip6mr_reg_vif(); 614 dev = ip6mr_reg_vif();
621 if (!dev) 615 if (!dev)
622 return -ENOBUFS; 616 return -ENOBUFS;
617 err = dev_set_allmulti(dev, 1);
618 if (err) {
619 unregister_netdevice(dev);
620 dev_put(dev);
621 return err;
622 }
623 break; 623 break;
624#endif 624#endif
625 case 0: 625 case 0:
626 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi); 626 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
627 if (!dev) 627 if (!dev)
628 return -EADDRNOTAVAIL; 628 return -EADDRNOTAVAIL;
629 dev_put(dev); 629 err = dev_set_allmulti(dev, 1);
630 if (err) {
631 dev_put(dev);
632 return err;
633 }
630 break; 634 break;
631 default: 635 default:
632 return -EINVAL; 636 return -EINVAL;
633 } 637 }
634 638
635 dev_set_allmulti(dev, 1);
636
637 /* 639 /*
638 * Fill in the VIF structures 640 * Fill in the VIF structures
639 */ 641 */
@@ -652,7 +654,6 @@ static int mif6_add(struct mif6ctl *vifc, int mrtsock)
652 654
653 /* And finish update writing critical data */ 655 /* And finish update writing critical data */
654 write_lock_bh(&mrt_lock); 656 write_lock_bh(&mrt_lock);
655 dev_hold(dev);
656 v->dev = dev; 657 v->dev = dev;
657#ifdef CONFIG_IPV6_PIMSM_V2 658#ifdef CONFIG_IPV6_PIMSM_V2
658 if (v->flags & MIFF_REGISTER) 659 if (v->flags & MIFF_REGISTER)
@@ -956,23 +957,51 @@ static struct notifier_block ip6_mr_notifier = {
956 * Setup for IP multicast routing 957 * Setup for IP multicast routing
957 */ 958 */
958 959
959void __init ip6_mr_init(void) 960int __init ip6_mr_init(void)
960{ 961{
962 int err;
963
961 mrt_cachep = kmem_cache_create("ip6_mrt_cache", 964 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
962 sizeof(struct mfc6_cache), 965 sizeof(struct mfc6_cache),
963 0, SLAB_HWCACHE_ALIGN, 966 0, SLAB_HWCACHE_ALIGN,
964 NULL); 967 NULL);
965 if (!mrt_cachep) 968 if (!mrt_cachep)
966 panic("cannot allocate ip6_mrt_cache"); 969 return -ENOMEM;
967 970
968 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); 971 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
969 register_netdevice_notifier(&ip6_mr_notifier); 972 err = register_netdevice_notifier(&ip6_mr_notifier);
973 if (err)
974 goto reg_notif_fail;
975#ifdef CONFIG_PROC_FS
976 err = -ENOMEM;
977 if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
978 goto proc_vif_fail;
979 if (!proc_net_fops_create(&init_net, "ip6_mr_cache",
980 0, &ip6mr_mfc_fops))
981 goto proc_cache_fail;
982#endif
983 return 0;
984reg_notif_fail:
985 kmem_cache_destroy(mrt_cachep);
970#ifdef CONFIG_PROC_FS 986#ifdef CONFIG_PROC_FS
971 proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops); 987proc_vif_fail:
972 proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops); 988 unregister_netdevice_notifier(&ip6_mr_notifier);
989proc_cache_fail:
990 proc_net_remove(&init_net, "ip6_mr_vif");
973#endif 991#endif
992 return err;
974} 993}
975 994
995void ip6_mr_cleanup(void)
996{
997#ifdef CONFIG_PROC_FS
998 proc_net_remove(&init_net, "ip6_mr_cache");
999 proc_net_remove(&init_net, "ip6_mr_vif");
1000#endif
1001 unregister_netdevice_notifier(&ip6_mr_notifier);
1002 del_timer(&ipmr_expire_timer);
1003 kmem_cache_destroy(mrt_cachep);
1004}
976 1005
977static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock) 1006static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
978{ 1007{
@@ -1248,7 +1277,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
1248 1277
1249#endif 1278#endif
1250 /* 1279 /*
1251 * Spurious command, or MRT_VERSION which you cannot 1280 * Spurious command, or MRT6_VERSION which you cannot
1252 * set. 1281 * set.
1253 */ 1282 */
1254 default: 1283 default:
@@ -1377,8 +1406,8 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1377 if (vif->flags & MIFF_REGISTER) { 1406 if (vif->flags & MIFF_REGISTER) {
1378 vif->pkt_out++; 1407 vif->pkt_out++;
1379 vif->bytes_out += skb->len; 1408 vif->bytes_out += skb->len;
1380 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_bytes += skb->len; 1409 vif->dev->stats.tx_bytes += skb->len;
1381 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_packets++; 1410 vif->dev->stats.tx_packets++;
1382 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT); 1411 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1383 kfree_skb(skb); 1412 kfree_skb(skb);
1384 return 0; 1413 return 0;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 86e28a75267f..030c0c956f9d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Based on linux/net/ipv4/ip_sockglue.c 8 * Based on linux/net/ipv4/ip_sockglue.c
9 * 9 *
10 * $Id: ipv6_sockglue.c,v 1.41 2002/02/01 22:01:04 davem Exp $
11 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index fd632dd7f98d..bd2fe4cfafa7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: mcast.c,v 1.40 2002/02/08 03:57:19 davem Exp $
9 *
10 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c 8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -164,7 +162,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
164 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ 162 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
165 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) 163 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
166 164
167#define MLDV2_QQIC(value) MLDV2_EXP(0x80, 4, 3, value)
168#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) 165#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
169 166
170#define IPV6_MLD_MAX_MSF 64 167#define IPV6_MLD_MAX_MSF 64
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 6cae5475737e..689dec899c57 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -208,5 +208,17 @@ config IP6_NF_RAW
208 If you want to compile it as a module, say M here and read 208 If you want to compile it as a module, say M here and read
209 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 209 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
210 210
211# security table for MAC policy
212config IP6_NF_SECURITY
213 tristate "Security table"
214 depends on IP6_NF_IPTABLES
215 depends on SECURITY
216 default m if NETFILTER_ADVANCED=n
217 help
218 This option adds a `security' table to iptables, for use
219 with Mandatory Access Control (MAC) policy.
220
221 If unsure, say N.
222
211endmenu 223endmenu
212 224
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index fbf2c14ed887..3f17c948eefb 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o 9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
11 12
12# objects for l3 independent conntrack 13# objects for l3 independent conntrack
13nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o 14nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 2eff3ae8977d..1b8815f6153d 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -159,7 +159,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
159 case IPQ_COPY_META: 159 case IPQ_COPY_META:
160 case IPQ_COPY_NONE: 160 case IPQ_COPY_NONE:
161 size = NLMSG_SPACE(sizeof(*pmsg)); 161 size = NLMSG_SPACE(sizeof(*pmsg));
162 data_len = 0;
163 break; 162 break;
164 163
165 case IPQ_COPY_PACKET: 164 case IPQ_COPY_PACKET:
@@ -226,8 +225,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
226 return skb; 225 return skb;
227 226
228nlmsg_failure: 227nlmsg_failure:
229 if (skb)
230 kfree_skb(skb);
231 *errp = -EINVAL; 228 *errp = -EINVAL;
232 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 229 printk(KERN_ERR "ip6_queue: error creating packet message\n");
233 return NULL; 230 return NULL;
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index f979e48b469b..55a2c290bad4 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -61,13 +61,25 @@ static struct xt_table packet_filter = {
61 61
62/* The work comes in here from netfilter.c. */ 62/* The work comes in here from netfilter.c. */
63static unsigned int 63static unsigned int
64ip6t_hook(unsigned int hook, 64ip6t_local_in_hook(unsigned int hook,
65 struct sk_buff *skb, 65 struct sk_buff *skb,
66 const struct net_device *in, 66 const struct net_device *in,
67 const struct net_device *out, 67 const struct net_device *out,
68 int (*okfn)(struct sk_buff *)) 68 int (*okfn)(struct sk_buff *))
69{
70 return ip6t_do_table(skb, hook, in, out,
71 nf_local_in_net(in, out)->ipv6.ip6table_filter);
72}
73
74static unsigned int
75ip6t_forward_hook(unsigned int hook,
76 struct sk_buff *skb,
77 const struct net_device *in,
78 const struct net_device *out,
79 int (*okfn)(struct sk_buff *))
69{ 80{
70 return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_filter); 81 return ip6t_do_table(skb, hook, in, out,
82 nf_forward_net(in, out)->ipv6.ip6table_filter);
71} 83}
72 84
73static unsigned int 85static unsigned int
@@ -87,19 +99,20 @@ ip6t_local_out_hook(unsigned int hook,
87 } 99 }
88#endif 100#endif
89 101
90 return ip6t_do_table(skb, hook, in, out, init_net.ipv6.ip6table_filter); 102 return ip6t_do_table(skb, hook, in, out,
103 nf_local_out_net(in, out)->ipv6.ip6table_filter);
91} 104}
92 105
93static struct nf_hook_ops ip6t_ops[] __read_mostly = { 106static struct nf_hook_ops ip6t_ops[] __read_mostly = {
94 { 107 {
95 .hook = ip6t_hook, 108 .hook = ip6t_local_in_hook,
96 .owner = THIS_MODULE, 109 .owner = THIS_MODULE,
97 .pf = PF_INET6, 110 .pf = PF_INET6,
98 .hooknum = NF_INET_LOCAL_IN, 111 .hooknum = NF_INET_LOCAL_IN,
99 .priority = NF_IP6_PRI_FILTER, 112 .priority = NF_IP6_PRI_FILTER,
100 }, 113 },
101 { 114 {
102 .hook = ip6t_hook, 115 .hook = ip6t_forward_hook,
103 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
104 .pf = PF_INET6, 117 .pf = PF_INET6,
105 .hooknum = NF_INET_FORWARD, 118 .hooknum = NF_INET_FORWARD,
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
new file mode 100644
index 000000000000..a07abee30497
--- /dev/null
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -0,0 +1,172 @@
1/*
2 * "security" table for IPv6
3 *
4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context
6 * to DAC.
7 *
8 * Based on iptable_mangle.c
9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/module.h>
19#include <linux/netfilter_ipv6/ip6_tables.h>
20
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
23MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
24
25#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
26 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT)
28
29static struct
30{
31 struct ip6t_replace repl;
32 struct ip6t_standard entries[3];
33 struct ip6t_error term;
34} initial_table __initdata = {
35 .repl = {
36 .name = "security",
37 .valid_hooks = SECURITY_VALID_HOOKS,
38 .num_entries = 4,
39 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
40 .hook_entry = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
44 },
45 .underflow = {
46 [NF_INET_LOCAL_IN] = 0,
47 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
48 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
49 },
50 },
51 .entries = {
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 },
56 .term = IP6T_ERROR_INIT, /* ERROR */
57};
58
59static struct xt_table security_table = {
60 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS,
62 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
63 .me = THIS_MODULE,
64 .af = AF_INET6,
65};
66
67static unsigned int
68ip6t_local_in_hook(unsigned int hook,
69 struct sk_buff *skb,
70 const struct net_device *in,
71 const struct net_device *out,
72 int (*okfn)(struct sk_buff *))
73{
74 return ip6t_do_table(skb, hook, in, out,
75 nf_local_in_net(in, out)->ipv6.ip6table_security);
76}
77
78static unsigned int
79ip6t_forward_hook(unsigned int hook,
80 struct sk_buff *skb,
81 const struct net_device *in,
82 const struct net_device *out,
83 int (*okfn)(struct sk_buff *))
84{
85 return ip6t_do_table(skb, hook, in, out,
86 nf_forward_net(in, out)->ipv6.ip6table_security);
87}
88
89static unsigned int
90ip6t_local_out_hook(unsigned int hook,
91 struct sk_buff *skb,
92 const struct net_device *in,
93 const struct net_device *out,
94 int (*okfn)(struct sk_buff *))
95{
96 /* TBD: handle short packets via raw socket */
97 return ip6t_do_table(skb, hook, in, out,
98 nf_local_out_net(in, out)->ipv6.ip6table_security);
99}
100
101static struct nf_hook_ops ip6t_ops[] __read_mostly = {
102 {
103 .hook = ip6t_local_in_hook,
104 .owner = THIS_MODULE,
105 .pf = PF_INET6,
106 .hooknum = NF_INET_LOCAL_IN,
107 .priority = NF_IP6_PRI_SECURITY,
108 },
109 {
110 .hook = ip6t_forward_hook,
111 .owner = THIS_MODULE,
112 .pf = PF_INET6,
113 .hooknum = NF_INET_FORWARD,
114 .priority = NF_IP6_PRI_SECURITY,
115 },
116 {
117 .hook = ip6t_local_out_hook,
118 .owner = THIS_MODULE,
119 .pf = PF_INET6,
120 .hooknum = NF_INET_LOCAL_OUT,
121 .priority = NF_IP6_PRI_SECURITY,
122 },
123};
124
125static int __net_init ip6table_security_net_init(struct net *net)
126{
127 net->ipv6.ip6table_security =
128 ip6t_register_table(net, &security_table, &initial_table.repl);
129
130 if (IS_ERR(net->ipv6.ip6table_security))
131 return PTR_ERR(net->ipv6.ip6table_security);
132
133 return 0;
134}
135
136static void __net_exit ip6table_security_net_exit(struct net *net)
137{
138 ip6t_unregister_table(net->ipv6.ip6table_security);
139}
140
141static struct pernet_operations ip6table_security_net_ops = {
142 .init = ip6table_security_net_init,
143 .exit = ip6table_security_net_exit,
144};
145
146static int __init ip6table_security_init(void)
147{
148 int ret;
149
150 ret = register_pernet_subsys(&ip6table_security_net_ops);
151 if (ret < 0)
152 return ret;
153
154 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
155 if (ret < 0)
156 goto cleanup_table;
157
158 return ret;
159
160cleanup_table:
161 unregister_pernet_subsys(&ip6table_security_net_ops);
162 return ret;
163}
164
165static void __exit ip6table_security_fini(void)
166{
167 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
168 unregister_pernet_subsys(&ip6table_security_net_ops);
169}
170
171module_init(ip6table_security_init);
172module_exit(ip6table_security_fini);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index ee713b03e9ec..14d47d833545 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -89,9 +89,8 @@ static int icmpv6_packet(struct nf_conn *ct,
89 means this will only run once even if count hits zero twice 89 means this will only run once even if count hits zero twice
90 (theoretically possible with SMP) */ 90 (theoretically possible with SMP) */
91 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 91 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
92 if (atomic_dec_and_test(&ct->proto.icmp.count) 92 if (atomic_dec_and_test(&ct->proto.icmp.count))
93 && del_timer(&ct->timeout)) 93 nf_ct_kill_acct(ct, ctinfo, skb);
94 ct->timeout.function((unsigned long)ct);
95 } else { 94 } else {
96 atomic_inc(&ct->proto.icmp.count); 95 atomic_inc(&ct->proto.icmp.count);
97 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); 96 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index df0736a4cafa..cbc7e514d3ec 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -7,8 +7,6 @@
7 * PROC file system. This is very similar to the IPv4 version, 7 * PROC file system. This is very similar to the IPv4 version,
8 * except it reports the sockets in the INET6 address family. 8 * except it reports the sockets in the INET6 address family.
9 * 9 *
10 * Version: $Id: proc.c,v 1.17 2002/02/01 22:01:04 davem Exp $
11 *
12 * Authors: David S. Miller (davem@caip.rutgers.edu) 10 * Authors: David S. Miller (davem@caip.rutgers.edu)
13 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
14 * 12 *
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index f929f47b925e..9ab789159913 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PF_INET6 protocol dispatch tables. 6 * PF_INET6 protocol dispatch tables.
7 * 7 *
8 * Version: $Id: protocol.c,v 1.10 2001/05/18 02:25:49 davem Exp $
9 *
10 * Authors: Pedro Roque <roque@di.fc.ul.pt> 8 * Authors: Pedro Roque <roque@di.fc.ul.pt>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 3aee12310d94..34cfb3f41c2c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Adapted from linux/net/ipv4/raw.c 8 * Adapted from linux/net/ipv4/raw.c
9 * 9 *
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support 11 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) 12 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
@@ -1164,13 +1162,13 @@ static void rawv6_close(struct sock *sk, long timeout)
1164 sk_common_release(sk); 1162 sk_common_release(sk);
1165} 1163}
1166 1164
1167static int raw6_destroy(struct sock *sk) 1165static void raw6_destroy(struct sock *sk)
1168{ 1166{
1169 lock_sock(sk); 1167 lock_sock(sk);
1170 ip6_flush_pending_frames(sk); 1168 ip6_flush_pending_frames(sk);
1171 release_sock(sk); 1169 release_sock(sk);
1172 1170
1173 return inet6_destroy_sock(sk); 1171 inet6_destroy_sock(sk);
1174} 1172}
1175 1173
1176static int rawv6_init_sk(struct sock *sk) 1174static int rawv6_init_sk(struct sock *sk)
@@ -1253,7 +1251,7 @@ static int raw6_seq_show(struct seq_file *seq, void *v)
1253 "local_address " 1251 "local_address "
1254 "remote_address " 1252 "remote_address "
1255 "st tx_queue rx_queue tr tm->when retrnsmt" 1253 "st tx_queue rx_queue tr tm->when retrnsmt"
1256 " uid timeout inode drops\n"); 1254 " uid timeout inode ref pointer drops\n");
1257 else 1255 else
1258 raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); 1256 raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
1259 return 0; 1257 return 0;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index a60d7d129713..6ab957ec2dd6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9 *
10 * Based on: net/ipv4/ip_fragment.c 8 * Based on: net/ipv4/ip_fragment.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -634,7 +632,7 @@ static struct inet6_protocol frag_protocol =
634}; 632};
635 633
636#ifdef CONFIG_SYSCTL 634#ifdef CONFIG_SYSCTL
637static struct ctl_table ip6_frags_ctl_table[] = { 635static struct ctl_table ip6_frags_ns_ctl_table[] = {
638 { 636 {
639 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, 637 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH,
640 .procname = "ip6frag_high_thresh", 638 .procname = "ip6frag_high_thresh",
@@ -660,6 +658,10 @@ static struct ctl_table ip6_frags_ctl_table[] = {
660 .proc_handler = &proc_dointvec_jiffies, 658 .proc_handler = &proc_dointvec_jiffies,
661 .strategy = &sysctl_jiffies, 659 .strategy = &sysctl_jiffies,
662 }, 660 },
661 { }
662};
663
664static struct ctl_table ip6_frags_ctl_table[] = {
663 { 665 {
664 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, 666 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL,
665 .procname = "ip6frag_secret_interval", 667 .procname = "ip6frag_secret_interval",
@@ -672,21 +674,20 @@ static struct ctl_table ip6_frags_ctl_table[] = {
672 { } 674 { }
673}; 675};
674 676
675static int ip6_frags_sysctl_register(struct net *net) 677static int ip6_frags_ns_sysctl_register(struct net *net)
676{ 678{
677 struct ctl_table *table; 679 struct ctl_table *table;
678 struct ctl_table_header *hdr; 680 struct ctl_table_header *hdr;
679 681
680 table = ip6_frags_ctl_table; 682 table = ip6_frags_ns_ctl_table;
681 if (net != &init_net) { 683 if (net != &init_net) {
682 table = kmemdup(table, sizeof(ip6_frags_ctl_table), GFP_KERNEL); 684 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
683 if (table == NULL) 685 if (table == NULL)
684 goto err_alloc; 686 goto err_alloc;
685 687
686 table[0].data = &net->ipv6.frags.high_thresh; 688 table[0].data = &net->ipv6.frags.high_thresh;
687 table[1].data = &net->ipv6.frags.low_thresh; 689 table[1].data = &net->ipv6.frags.low_thresh;
688 table[2].data = &net->ipv6.frags.timeout; 690 table[2].data = &net->ipv6.frags.timeout;
689 table[3].mode &= ~0222;
690 } 691 }
691 692
692 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 693 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
@@ -703,7 +704,7 @@ err_alloc:
703 return -ENOMEM; 704 return -ENOMEM;
704} 705}
705 706
706static void ip6_frags_sysctl_unregister(struct net *net) 707static void ip6_frags_ns_sysctl_unregister(struct net *net)
707{ 708{
708 struct ctl_table *table; 709 struct ctl_table *table;
709 710
@@ -711,13 +712,36 @@ static void ip6_frags_sysctl_unregister(struct net *net)
711 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 712 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
712 kfree(table); 713 kfree(table);
713} 714}
715
716static struct ctl_table_header *ip6_ctl_header;
717
718static int ip6_frags_sysctl_register(void)
719{
720 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
721 ip6_frags_ctl_table);
722 return ip6_ctl_header == NULL ? -ENOMEM : 0;
723}
724
725static void ip6_frags_sysctl_unregister(void)
726{
727 unregister_net_sysctl_table(ip6_ctl_header);
728}
714#else 729#else
715static inline int ip6_frags_sysctl_register(struct net *net) 730static inline int ip6_frags_ns_sysctl_register(struct net *net)
716{ 731{
717 return 0; 732 return 0;
718} 733}
719 734
720static inline void ip6_frags_sysctl_unregister(struct net *net) 735static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
736{
737}
738
739static inline int ip6_frags_sysctl_register(void)
740{
741 return 0;
742}
743
744static inline void ip6_frags_sysctl_unregister(void)
721{ 745{
722} 746}
723#endif 747#endif
@@ -730,12 +754,12 @@ static int ipv6_frags_init_net(struct net *net)
730 754
731 inet_frags_init_net(&net->ipv6.frags); 755 inet_frags_init_net(&net->ipv6.frags);
732 756
733 return ip6_frags_sysctl_register(net); 757 return ip6_frags_ns_sysctl_register(net);
734} 758}
735 759
736static void ipv6_frags_exit_net(struct net *net) 760static void ipv6_frags_exit_net(struct net *net)
737{ 761{
738 ip6_frags_sysctl_unregister(net); 762 ip6_frags_ns_sysctl_unregister(net);
739 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 763 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
740} 764}
741 765
@@ -752,7 +776,13 @@ int __init ipv6_frag_init(void)
752 if (ret) 776 if (ret)
753 goto out; 777 goto out;
754 778
755 register_pernet_subsys(&ip6_frags_ops); 779 ret = ip6_frags_sysctl_register();
780 if (ret)
781 goto err_sysctl;
782
783 ret = register_pernet_subsys(&ip6_frags_ops);
784 if (ret)
785 goto err_pernet;
756 786
757 ip6_frags.hashfn = ip6_hashfn; 787 ip6_frags.hashfn = ip6_hashfn;
758 ip6_frags.constructor = ip6_frag_init; 788 ip6_frags.constructor = ip6_frag_init;
@@ -765,11 +795,18 @@ int __init ipv6_frag_init(void)
765 inet_frags_init(&ip6_frags); 795 inet_frags_init(&ip6_frags);
766out: 796out:
767 return ret; 797 return ret;
798
799err_pernet:
800 ip6_frags_sysctl_unregister();
801err_sysctl:
802 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
803 goto out;
768} 804}
769 805
770void ipv6_frag_exit(void) 806void ipv6_frag_exit(void)
771{ 807{
772 inet_frags_fini(&ip6_frags); 808 inet_frags_fini(&ip6_frags);
809 ip6_frags_sysctl_unregister();
773 unregister_pernet_subsys(&ip6_frags_ops); 810 unregister_pernet_subsys(&ip6_frags_ops);
774 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 811 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
775} 812}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7ff687020fa9..5d6c166dfbb6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -230,7 +228,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt)
230static inline int rt6_need_strict(struct in6_addr *daddr) 228static inline int rt6_need_strict(struct in6_addr *daddr)
231{ 229{
232 return (ipv6_addr_type(daddr) & 230 return (ipv6_addr_type(daddr) &
233 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); 231 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK));
234} 232}
235 233
236/* 234/*
@@ -239,15 +237,20 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
239 237
240static inline struct rt6_info *rt6_device_match(struct net *net, 238static inline struct rt6_info *rt6_device_match(struct net *net,
241 struct rt6_info *rt, 239 struct rt6_info *rt,
240 struct in6_addr *saddr,
242 int oif, 241 int oif,
243 int flags) 242 int flags)
244{ 243{
245 struct rt6_info *local = NULL; 244 struct rt6_info *local = NULL;
246 struct rt6_info *sprt; 245 struct rt6_info *sprt;
247 246
248 if (oif) { 247 if (!oif && ipv6_addr_any(saddr))
249 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { 248 goto out;
250 struct net_device *dev = sprt->rt6i_dev; 249
250 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
251 struct net_device *dev = sprt->rt6i_dev;
252
253 if (oif) {
251 if (dev->ifindex == oif) 254 if (dev->ifindex == oif)
252 return sprt; 255 return sprt;
253 if (dev->flags & IFF_LOOPBACK) { 256 if (dev->flags & IFF_LOOPBACK) {
@@ -261,14 +264,21 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
261 } 264 }
262 local = sprt; 265 local = sprt;
263 } 266 }
267 } else {
268 if (ipv6_chk_addr(net, saddr, dev,
269 flags & RT6_LOOKUP_F_IFACE))
270 return sprt;
264 } 271 }
272 }
265 273
274 if (oif) {
266 if (local) 275 if (local)
267 return local; 276 return local;
268 277
269 if (flags & RT6_LOOKUP_F_IFACE) 278 if (flags & RT6_LOOKUP_F_IFACE)
270 return net->ipv6.ip6_null_entry; 279 return net->ipv6.ip6_null_entry;
271 } 280 }
281out:
272 return rt; 282 return rt;
273} 283}
274 284
@@ -541,7 +551,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
541 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); 551 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
542restart: 552restart:
543 rt = fn->leaf; 553 rt = fn->leaf;
544 rt = rt6_device_match(net, rt, fl->oif, flags); 554 rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
545 BACKTRACK(net, &fl->fl6_src); 555 BACKTRACK(net, &fl->fl6_src);
546out: 556out:
547 dst_use(&rt->u.dst, jiffies); 557 dst_use(&rt->u.dst, jiffies);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 32e871a6c25a..b7a50e968506 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: sit.c,v 1.53 2001/09/25 05:09:53 davem Exp $
10 *
11 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -493,13 +491,13 @@ static int ipip6_rcv(struct sk_buff *skb)
493 491
494 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 492 if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
495 !isatap_chksrc(skb, iph, tunnel)) { 493 !isatap_chksrc(skb, iph, tunnel)) {
496 tunnel->stat.rx_errors++; 494 tunnel->dev->stats.rx_errors++;
497 read_unlock(&ipip6_lock); 495 read_unlock(&ipip6_lock);
498 kfree_skb(skb); 496 kfree_skb(skb);
499 return 0; 497 return 0;
500 } 498 }
501 tunnel->stat.rx_packets++; 499 tunnel->dev->stats.rx_packets++;
502 tunnel->stat.rx_bytes += skb->len; 500 tunnel->dev->stats.rx_bytes += skb->len;
503 skb->dev = tunnel->dev; 501 skb->dev = tunnel->dev;
504 dst_release(skb->dst); 502 dst_release(skb->dst);
505 skb->dst = NULL; 503 skb->dst = NULL;
@@ -539,7 +537,7 @@ static inline __be32 try_6to4(struct in6_addr *v6dst)
539static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 537static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
540{ 538{
541 struct ip_tunnel *tunnel = netdev_priv(dev); 539 struct ip_tunnel *tunnel = netdev_priv(dev);
542 struct net_device_stats *stats = &tunnel->stat; 540 struct net_device_stats *stats = &tunnel->dev->stats;
543 struct iphdr *tiph = &tunnel->parms.iph; 541 struct iphdr *tiph = &tunnel->parms.iph;
544 struct ipv6hdr *iph6 = ipv6_hdr(skb); 542 struct ipv6hdr *iph6 = ipv6_hdr(skb);
545 u8 tos = tunnel->parms.iph.tos; 543 u8 tos = tunnel->parms.iph.tos;
@@ -553,7 +551,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
553 int addr_type; 551 int addr_type;
554 552
555 if (tunnel->recursion++) { 553 if (tunnel->recursion++) {
556 tunnel->stat.collisions++; 554 stats->collisions++;
557 goto tx_error; 555 goto tx_error;
558 } 556 }
559 557
@@ -620,20 +618,20 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
620 .oif = tunnel->parms.link, 618 .oif = tunnel->parms.link,
621 .proto = IPPROTO_IPV6 }; 619 .proto = IPPROTO_IPV6 };
622 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 620 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
623 tunnel->stat.tx_carrier_errors++; 621 stats->tx_carrier_errors++;
624 goto tx_error_icmp; 622 goto tx_error_icmp;
625 } 623 }
626 } 624 }
627 if (rt->rt_type != RTN_UNICAST) { 625 if (rt->rt_type != RTN_UNICAST) {
628 ip_rt_put(rt); 626 ip_rt_put(rt);
629 tunnel->stat.tx_carrier_errors++; 627 stats->tx_carrier_errors++;
630 goto tx_error_icmp; 628 goto tx_error_icmp;
631 } 629 }
632 tdev = rt->u.dst.dev; 630 tdev = rt->u.dst.dev;
633 631
634 if (tdev == dev) { 632 if (tdev == dev) {
635 ip_rt_put(rt); 633 ip_rt_put(rt);
636 tunnel->stat.collisions++; 634 stats->collisions++;
637 goto tx_error; 635 goto tx_error;
638 } 636 }
639 637
@@ -643,7 +641,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
643 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 641 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
644 642
645 if (mtu < 68) { 643 if (mtu < 68) {
646 tunnel->stat.collisions++; 644 stats->collisions++;
647 ip_rt_put(rt); 645 ip_rt_put(rt);
648 goto tx_error; 646 goto tx_error;
649 } 647 }
@@ -920,11 +918,6 @@ done:
920 return err; 918 return err;
921} 919}
922 920
923static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev)
924{
925 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
926}
927
928static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) 921static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
929{ 922{
930 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 923 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
@@ -938,7 +931,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
938 dev->uninit = ipip6_tunnel_uninit; 931 dev->uninit = ipip6_tunnel_uninit;
939 dev->destructor = free_netdev; 932 dev->destructor = free_netdev;
940 dev->hard_start_xmit = ipip6_tunnel_xmit; 933 dev->hard_start_xmit = ipip6_tunnel_xmit;
941 dev->get_stats = ipip6_tunnel_get_stats;
942 dev->do_ioctl = ipip6_tunnel_ioctl; 934 dev->do_ioctl = ipip6_tunnel_ioctl;
943 dev->change_mtu = ipip6_tunnel_change_mtu; 935 dev->change_mtu = ipip6_tunnel_change_mtu;
944 936
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 3ecc1157994e..6a68eeb7bbf8 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -137,7 +137,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
137 ; 137 ;
138 *mssp = msstab[mssind] + 1; 138 *mssp = msstab[mssind] + 1;
139 139
140 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); 140 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
141 141
142 return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, 142 return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
143 th->dest, ntohl(th->seq), 143 th->dest, ntohl(th->seq),
@@ -177,11 +177,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
177 177
178 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 178 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
179 (mss = cookie_check(skb, cookie)) == 0) { 179 (mss = cookie_check(skb, cookie)) == 0) {
180 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); 180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
181 goto out; 181 goto out;
182 } 182 }
183 183
184 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV); 184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
185 185
186 /* check for timestamp cookie support */ 186 /* check for timestamp cookie support */
187 memset(&tcp_opt, 0, sizeof(tcp_opt)); 187 memset(&tcp_opt, 0, sizeof(tcp_opt));
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 3804dcbbfab0..5c99274558bf 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -37,6 +37,10 @@ static ctl_table ipv6_table_template[] = {
37 .mode = 0644, 37 .mode = 0644,
38 .proc_handler = &proc_dointvec 38 .proc_handler = &proc_dointvec
39 }, 39 },
40 { .ctl_name = 0 }
41};
42
43static ctl_table ipv6_table[] = {
40 { 44 {
41 .ctl_name = NET_IPV6_MLD_MAX_MSF, 45 .ctl_name = NET_IPV6_MLD_MAX_MSF,
42 .procname = "mld_max_msf", 46 .procname = "mld_max_msf",
@@ -80,12 +84,6 @@ static int ipv6_sysctl_net_init(struct net *net)
80 84
81 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; 85 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
82 86
83 /* We don't want this value to be per namespace, it should be global
84 to all namespaces, so make it read-only when we are not in the
85 init network namespace */
86 if (net != &init_net)
87 ipv6_table[3].mode = 0444;
88
89 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, 87 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
90 ipv6_table); 88 ipv6_table);
91 if (!net->ipv6.sysctl.table) 89 if (!net->ipv6.sysctl.table)
@@ -126,12 +124,29 @@ static struct pernet_operations ipv6_sysctl_net_ops = {
126 .exit = ipv6_sysctl_net_exit, 124 .exit = ipv6_sysctl_net_exit,
127}; 125};
128 126
127static struct ctl_table_header *ip6_header;
128
129int ipv6_sysctl_register(void) 129int ipv6_sysctl_register(void)
130{ 130{
131 return register_pernet_subsys(&ipv6_sysctl_net_ops); 131 int err = -ENOMEM;;
132
133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
134 if (ip6_header == NULL)
135 goto out;
136
137 err = register_pernet_subsys(&ipv6_sysctl_net_ops);
138 if (err)
139 goto err_pernet;
140out:
141 return err;
142
143err_pernet:
144 unregister_net_sysctl_table(ip6_header);
145 goto out;
132} 146}
133 147
134void ipv6_sysctl_unregister(void) 148void ipv6_sysctl_unregister(void)
135{ 149{
150 unregister_net_sysctl_table(ip6_header);
136 unregister_pernet_subsys(&ipv6_sysctl_net_ops); 151 unregister_pernet_subsys(&ipv6_sysctl_net_ops);
137} 152}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 40ea9c36d24b..ca5b93a5c02a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on: 8 * Based on:
11 * linux/net/ipv4/tcp.c 9 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c 10 * linux/net/ipv4/tcp_input.c
@@ -72,8 +70,6 @@
72 70
73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 71static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 72static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
75static void tcp_v6_send_check(struct sock *sk, int len,
76 struct sk_buff *skb);
77 73
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 74static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 75
@@ -82,6 +78,12 @@ static struct inet_connection_sock_af_ops ipv6_specific;
82#ifdef CONFIG_TCP_MD5SIG 78#ifdef CONFIG_TCP_MD5SIG
83static struct tcp_sock_af_ops tcp_sock_ipv6_specific; 79static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 80static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
81#else
82static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
83 struct in6_addr *addr)
84{
85 return NULL;
86}
85#endif 87#endif
86 88
87static void tcp_v6_hash(struct sock *sk) 89static void tcp_v6_hash(struct sock *sk)
@@ -321,8 +323,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
321 int err; 323 int err;
322 struct tcp_sock *tp; 324 struct tcp_sock *tp;
323 __u32 seq; 325 __u32 seq;
326 struct net *net = dev_net(skb->dev);
324 327
325 sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr, 328 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
326 th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 329 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
327 330
328 if (sk == NULL) { 331 if (sk == NULL) {
@@ -337,7 +340,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
337 340
338 bh_lock_sock(sk); 341 bh_lock_sock(sk);
339 if (sock_owned_by_user(sk)) 342 if (sock_owned_by_user(sk))
340 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 343 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
341 344
342 if (sk->sk_state == TCP_CLOSE) 345 if (sk->sk_state == TCP_CLOSE)
343 goto out; 346 goto out;
@@ -346,7 +349,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
346 seq = ntohl(th->seq); 349 seq = ntohl(th->seq);
347 if (sk->sk_state != TCP_LISTEN && 350 if (sk->sk_state != TCP_LISTEN &&
348 !between(seq, tp->snd_una, tp->snd_nxt)) { 351 !between(seq, tp->snd_una, tp->snd_nxt)) {
349 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 352 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
350 goto out; 353 goto out;
351 } 354 }
352 355
@@ -421,7 +424,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
421 BUG_TRAP(req->sk == NULL); 424 BUG_TRAP(req->sk == NULL);
422 425
423 if (seq != tcp_rsk(req)->snt_isn) { 426 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 427 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
425 goto out; 428 goto out;
426 } 429 }
427 430
@@ -736,78 +739,34 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
736static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 739static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
737 struct in6_addr *saddr, 740 struct in6_addr *saddr,
738 struct in6_addr *daddr, 741 struct in6_addr *daddr,
739 struct tcphdr *th, int protocol, 742 struct tcphdr *th, unsigned int tcplen)
740 unsigned int tcplen)
741{ 743{
742 struct scatterlist sg[4];
743 __u16 data_len;
744 int block = 0;
745 __sum16 cksum;
746 struct tcp_md5sig_pool *hp; 744 struct tcp_md5sig_pool *hp;
747 struct tcp6_pseudohdr *bp; 745 struct tcp6_pseudohdr *bp;
748 struct hash_desc *desc;
749 int err; 746 int err;
750 unsigned int nbytes = 0;
751 747
752 hp = tcp_get_md5sig_pool(); 748 hp = tcp_get_md5sig_pool();
753 if (!hp) { 749 if (!hp) {
754 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__); 750 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
755 goto clear_hash_noput; 751 goto clear_hash_noput;
756 } 752 }
753
757 bp = &hp->md5_blk.ip6; 754 bp = &hp->md5_blk.ip6;
758 desc = &hp->md5_desc;
759 755
760 /* 1. TCP pseudo-header (RFC2460) */ 756 /* 1. TCP pseudo-header (RFC2460) */
761 ipv6_addr_copy(&bp->saddr, saddr); 757 ipv6_addr_copy(&bp->saddr, saddr);
762 ipv6_addr_copy(&bp->daddr, daddr); 758 ipv6_addr_copy(&bp->daddr, daddr);
763 bp->len = htonl(tcplen); 759 bp->len = htonl(tcplen);
764 bp->protocol = htonl(protocol); 760 bp->protocol = htonl(IPPROTO_TCP);
765 761
766 sg_init_table(sg, 4); 762 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
763 th, tcplen, hp);
767 764
768 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 765 if (err)
769 nbytes += sizeof(*bp);
770
771 /* 2. TCP header, excluding options */
772 cksum = th->check;
773 th->check = 0;
774 sg_set_buf(&sg[block++], th, sizeof(*th));
775 nbytes += sizeof(*th);
776
777 /* 3. TCP segment data (if any) */
778 data_len = tcplen - (th->doff << 2);
779 if (data_len > 0) {
780 u8 *data = (u8 *)th + (th->doff << 2);
781 sg_set_buf(&sg[block++], data, data_len);
782 nbytes += data_len;
783 }
784
785 /* 4. shared key */
786 sg_set_buf(&sg[block++], key->key, key->keylen);
787 nbytes += key->keylen;
788
789 sg_mark_end(&sg[block - 1]);
790
791 /* Now store the hash into the packet */
792 err = crypto_hash_init(desc);
793 if (err) {
794 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
795 goto clear_hash;
796 }
797 err = crypto_hash_update(desc, sg, nbytes);
798 if (err) {
799 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
800 goto clear_hash;
801 }
802 err = crypto_hash_final(desc, md5_hash);
803 if (err) {
804 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
805 goto clear_hash; 766 goto clear_hash;
806 }
807 767
808 /* Reset header, and free up the crypto */ 768 /* Free up the crypto pool */
809 tcp_put_md5sig_pool(); 769 tcp_put_md5sig_pool();
810 th->check = cksum;
811out: 770out:
812 return 0; 771 return 0;
813clear_hash: 772clear_hash:
@@ -821,8 +780,7 @@ static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
821 struct sock *sk, 780 struct sock *sk,
822 struct dst_entry *dst, 781 struct dst_entry *dst,
823 struct request_sock *req, 782 struct request_sock *req,
824 struct tcphdr *th, int protocol, 783 struct tcphdr *th, unsigned int tcplen)
825 unsigned int tcplen)
826{ 784{
827 struct in6_addr *saddr, *daddr; 785 struct in6_addr *saddr, *daddr;
828 786
@@ -835,7 +793,7 @@ static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
835 } 793 }
836 return tcp_v6_do_calc_md5_hash(md5_hash, key, 794 return tcp_v6_do_calc_md5_hash(md5_hash, key,
837 saddr, daddr, 795 saddr, daddr,
838 th, protocol, tcplen); 796 th, tcplen);
839} 797}
840 798
841static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 799static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
@@ -844,43 +802,12 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
844 struct tcp_md5sig_key *hash_expected; 802 struct tcp_md5sig_key *hash_expected;
845 struct ipv6hdr *ip6h = ipv6_hdr(skb); 803 struct ipv6hdr *ip6h = ipv6_hdr(skb);
846 struct tcphdr *th = tcp_hdr(skb); 804 struct tcphdr *th = tcp_hdr(skb);
847 int length = (th->doff << 2) - sizeof (*th);
848 int genhash; 805 int genhash;
849 u8 *ptr;
850 u8 newhash[16]; 806 u8 newhash[16];
851 807
852 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); 808 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
809 hash_location = tcp_parse_md5sig_option(th);
853 810
854 /* If the TCP option is too short, we can short cut */
855 if (length < TCPOLEN_MD5SIG)
856 return hash_expected ? 1 : 0;
857
858 /* parse options */
859 ptr = (u8*)(th + 1);
860 while (length > 0) {
861 int opcode = *ptr++;
862 int opsize;
863
864 switch(opcode) {
865 case TCPOPT_EOL:
866 goto done_opts;
867 case TCPOPT_NOP:
868 length--;
869 continue;
870 default:
871 opsize = *ptr++;
872 if (opsize < 2 || opsize > length)
873 goto done_opts;
874 if (opcode == TCPOPT_MD5SIG) {
875 hash_location = ptr;
876 goto done_opts;
877 }
878 }
879 ptr += opsize - 2;
880 length -= opsize;
881 }
882
883done_opts:
884 /* do we have a hash as expected? */ 811 /* do we have a hash as expected? */
885 if (!hash_expected) { 812 if (!hash_expected) {
886 if (!hash_location) 813 if (!hash_location)
@@ -910,8 +837,7 @@ done_opts:
910 genhash = tcp_v6_do_calc_md5_hash(newhash, 837 genhash = tcp_v6_do_calc_md5_hash(newhash,
911 hash_expected, 838 hash_expected,
912 &ip6h->saddr, &ip6h->daddr, 839 &ip6h->saddr, &ip6h->daddr,
913 th, sk->sk_protocol, 840 th, skb->len);
914 skb->len);
915 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 841 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
916 if (net_ratelimit()) { 842 if (net_ratelimit()) {
917 printk(KERN_INFO "MD5 Hash %s for " 843 printk(KERN_INFO "MD5 Hash %s for "
@@ -1051,7 +977,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1051 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key, 977 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1052 &ipv6_hdr(skb)->daddr, 978 &ipv6_hdr(skb)->daddr,
1053 &ipv6_hdr(skb)->saddr, 979 &ipv6_hdr(skb)->saddr,
1054 t1, IPPROTO_TCP, tot_len); 980 t1, tot_len);
1055 } 981 }
1056#endif 982#endif
1057 983
@@ -1079,8 +1005,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1079 1005
1080 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1006 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1081 ip6_xmit(ctl_sk, buff, &fl, NULL, 0); 1007 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1082 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 1008 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1083 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 1009 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1084 return; 1010 return;
1085 } 1011 }
1086 } 1012 }
@@ -1088,8 +1014,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1088 kfree_skb(buff); 1014 kfree_skb(buff);
1089} 1015}
1090 1016
1091static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, 1017static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1092 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) 1018 struct tcp_md5sig_key *key)
1093{ 1019{
1094 struct tcphdr *th = tcp_hdr(skb), *t1; 1020 struct tcphdr *th = tcp_hdr(skb), *t1;
1095 struct sk_buff *buff; 1021 struct sk_buff *buff;
@@ -1098,22 +1024,6 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1098 struct sock *ctl_sk = net->ipv6.tcp_sk; 1024 struct sock *ctl_sk = net->ipv6.tcp_sk;
1099 unsigned int tot_len = sizeof(struct tcphdr); 1025 unsigned int tot_len = sizeof(struct tcphdr);
1100 __be32 *topt; 1026 __be32 *topt;
1101#ifdef CONFIG_TCP_MD5SIG
1102 struct tcp_md5sig_key *key;
1103 struct tcp_md5sig_key tw_key;
1104#endif
1105
1106#ifdef CONFIG_TCP_MD5SIG
1107 if (!tw && skb->sk) {
1108 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1109 } else if (tw && tw->tw_md5_keylen) {
1110 tw_key.key = tw->tw_md5_key;
1111 tw_key.keylen = tw->tw_md5_keylen;
1112 key = &tw_key;
1113 } else {
1114 key = NULL;
1115 }
1116#endif
1117 1027
1118 if (ts) 1028 if (ts)
1119 tot_len += TCPOLEN_TSTAMP_ALIGNED; 1029 tot_len += TCPOLEN_TSTAMP_ALIGNED;
@@ -1157,7 +1067,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1157 tcp_v6_do_calc_md5_hash((__u8 *)topt, key, 1067 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1158 &ipv6_hdr(skb)->daddr, 1068 &ipv6_hdr(skb)->daddr,
1159 &ipv6_hdr(skb)->saddr, 1069 &ipv6_hdr(skb)->saddr,
1160 t1, IPPROTO_TCP, tot_len); 1070 t1, tot_len);
1161 } 1071 }
1162#endif 1072#endif
1163 1073
@@ -1180,7 +1090,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1180 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { 1090 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1181 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1091 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1182 ip6_xmit(ctl_sk, buff, &fl, NULL, 0); 1092 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1183 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 1093 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1184 return; 1094 return;
1185 } 1095 }
1186 } 1096 }
@@ -1193,16 +1103,17 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1193 struct inet_timewait_sock *tw = inet_twsk(sk); 1103 struct inet_timewait_sock *tw = inet_twsk(sk);
1194 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1104 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1195 1105
1196 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1106 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1197 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1107 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1198 tcptw->tw_ts_recent); 1108 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1199 1109
1200 inet_twsk_put(tw); 1110 inet_twsk_put(tw);
1201} 1111}
1202 1112
1203static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1113static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1204{ 1114{
1205 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); 1115 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1116 tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr));
1206} 1117}
1207 1118
1208 1119
@@ -1538,9 +1449,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1538 return newsk; 1449 return newsk;
1539 1450
1540out_overflow: 1451out_overflow:
1541 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1452 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1542out: 1453out:
1543 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1454 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1544 if (opt && opt != np->opt) 1455 if (opt && opt != np->opt)
1545 sock_kfree_s(sk, opt, opt->tot_len); 1456 sock_kfree_s(sk, opt, opt->tot_len);
1546 dst_release(dst); 1457 dst_release(dst);
@@ -1669,7 +1580,7 @@ discard:
1669 kfree_skb(skb); 1580 kfree_skb(skb);
1670 return 0; 1581 return 0;
1671csum_err: 1582csum_err:
1672 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1583 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1673 goto discard; 1584 goto discard;
1674 1585
1675 1586
@@ -1707,6 +1618,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1707 struct tcphdr *th; 1618 struct tcphdr *th;
1708 struct sock *sk; 1619 struct sock *sk;
1709 int ret; 1620 int ret;
1621 struct net *net = dev_net(skb->dev);
1710 1622
1711 if (skb->pkt_type != PACKET_HOST) 1623 if (skb->pkt_type != PACKET_HOST)
1712 goto discard_it; 1624 goto discard_it;
@@ -1714,7 +1626,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1714 /* 1626 /*
1715 * Count it even if it's bad. 1627 * Count it even if it's bad.
1716 */ 1628 */
1717 TCP_INC_STATS_BH(TCP_MIB_INSEGS); 1629 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1718 1630
1719 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1631 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1720 goto discard_it; 1632 goto discard_it;
@@ -1738,7 +1650,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1738 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); 1650 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1739 TCP_SKB_CB(skb)->sacked = 0; 1651 TCP_SKB_CB(skb)->sacked = 0;
1740 1652
1741 sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, 1653 sk = __inet6_lookup(net, &tcp_hashinfo,
1742 &ipv6_hdr(skb)->saddr, th->source, 1654 &ipv6_hdr(skb)->saddr, th->source,
1743 &ipv6_hdr(skb)->daddr, ntohs(th->dest), 1655 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1744 inet6_iif(skb)); 1656 inet6_iif(skb));
@@ -1786,7 +1698,7 @@ no_tcp_socket:
1786 1698
1787 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1699 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1788bad_packet: 1700bad_packet:
1789 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1701 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1790 } else { 1702 } else {
1791 tcp_v6_send_reset(NULL, skb); 1703 tcp_v6_send_reset(NULL, skb);
1792 } 1704 }
@@ -1811,7 +1723,7 @@ do_time_wait:
1811 } 1723 }
1812 1724
1813 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1725 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1814 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1726 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1815 inet_twsk_put(inet_twsk(sk)); 1727 inet_twsk_put(inet_twsk(sk));
1816 goto discard_it; 1728 goto discard_it;
1817 } 1729 }
@@ -1960,7 +1872,7 @@ static int tcp_v6_init_sock(struct sock *sk)
1960 return 0; 1872 return 0;
1961} 1873}
1962 1874
1963static int tcp_v6_destroy_sock(struct sock *sk) 1875static void tcp_v6_destroy_sock(struct sock *sk)
1964{ 1876{
1965#ifdef CONFIG_TCP_MD5SIG 1877#ifdef CONFIG_TCP_MD5SIG
1966 /* Clean up the MD5 key list */ 1878 /* Clean up the MD5 key list */
@@ -1968,7 +1880,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
1968 tcp_v6_clear_md5_list(sk); 1880 tcp_v6_clear_md5_list(sk);
1969#endif 1881#endif
1970 tcp_v4_destroy_sock(sk); 1882 tcp_v4_destroy_sock(sk);
1971 return inet6_destroy_sock(sk); 1883 inet6_destroy_sock(sk);
1972} 1884}
1973 1885
1974#ifdef CONFIG_PROC_FS 1886#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dd309626ae9a..d1477b350f76 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Based on linux/ipv4/udp.c 8 * Based on linux/ipv4/udp.c
9 * 9 *
10 * $Id: udp.c,v 1.65 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support 11 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 12 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
@@ -67,7 +65,7 @@ static struct sock *__udp6_lib_lookup(struct net *net,
67 int badness = -1; 65 int badness = -1;
68 66
69 read_lock(&udp_hash_lock); 67 read_lock(&udp_hash_lock);
70 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 68 sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) {
71 struct inet_sock *inet = inet_sk(sk); 69 struct inet_sock *inet = inet_sk(sk);
72 70
73 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && 71 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
@@ -168,7 +166,8 @@ try_again:
168 goto out_free; 166 goto out_free;
169 167
170 if (!peeked) 168 if (!peeked)
171 UDP6_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); 169 UDP6_INC_STATS_USER(sock_net(sk),
170 UDP_MIB_INDATAGRAMS, is_udplite);
172 171
173 sock_recv_timestamp(msg, sk, skb); 172 sock_recv_timestamp(msg, sk, skb);
174 173
@@ -215,7 +214,7 @@ out:
215csum_copy_err: 214csum_copy_err:
216 lock_sock(sk); 215 lock_sock(sk);
217 if (!skb_kill_datagram(sk, skb, flags)) 216 if (!skb_kill_datagram(sk, skb, flags))
218 UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); 217 UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
219 release_sock(sk); 218 release_sock(sk);
220 219
221 if (flags & MSG_DONTWAIT) 220 if (flags & MSG_DONTWAIT)
@@ -299,14 +298,17 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
299 298
300 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 299 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
301 /* Note that an ENOMEM error is charged twice */ 300 /* Note that an ENOMEM error is charged twice */
302 if (rc == -ENOMEM) 301 if (rc == -ENOMEM) {
303 UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); 302 UDP6_INC_STATS_BH(sock_net(sk),
303 UDP_MIB_RCVBUFERRORS, is_udplite);
304 atomic_inc(&sk->sk_drops);
305 }
304 goto drop; 306 goto drop;
305 } 307 }
306 308
307 return 0; 309 return 0;
308drop: 310drop:
309 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 311 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
310 kfree_skb(skb); 312 kfree_skb(skb);
311 return -1; 313 return -1;
312} 314}
@@ -355,15 +357,16 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
355 * Note: called only from the BH handler context, 357 * Note: called only from the BH handler context,
356 * so we don't need to lock the hashes. 358 * so we don't need to lock the hashes.
357 */ 359 */
358static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr, 360static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
359 struct in6_addr *daddr, struct hlist_head udptable[]) 361 struct in6_addr *saddr, struct in6_addr *daddr,
362 struct hlist_head udptable[])
360{ 363{
361 struct sock *sk, *sk2; 364 struct sock *sk, *sk2;
362 const struct udphdr *uh = udp_hdr(skb); 365 const struct udphdr *uh = udp_hdr(skb);
363 int dif; 366 int dif;
364 367
365 read_lock(&udp_hash_lock); 368 read_lock(&udp_hash_lock);
366 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); 369 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
367 dif = inet6_iif(skb); 370 dif = inet6_iif(skb);
368 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 371 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
369 if (!sk) { 372 if (!sk) {
@@ -437,6 +440,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
437 struct net_device *dev = skb->dev; 440 struct net_device *dev = skb->dev;
438 struct in6_addr *saddr, *daddr; 441 struct in6_addr *saddr, *daddr;
439 u32 ulen = 0; 442 u32 ulen = 0;
443 struct net *net = dev_net(skb->dev);
440 444
441 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 445 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
442 goto short_packet; 446 goto short_packet;
@@ -475,7 +479,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
475 * Multicast receive code 479 * Multicast receive code
476 */ 480 */
477 if (ipv6_addr_is_multicast(daddr)) 481 if (ipv6_addr_is_multicast(daddr))
478 return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable); 482 return __udp6_lib_mcast_deliver(net, skb,
483 saddr, daddr, udptable);
479 484
480 /* Unicast */ 485 /* Unicast */
481 486
@@ -483,7 +488,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
483 * check socket cache ... must talk to Alan about his plans 488 * check socket cache ... must talk to Alan about his plans
484 * for sock caches... i'll skip this for now. 489 * for sock caches... i'll skip this for now.
485 */ 490 */
486 sk = __udp6_lib_lookup(dev_net(skb->dev), saddr, uh->source, 491 sk = __udp6_lib_lookup(net, saddr, uh->source,
487 daddr, uh->dest, inet6_iif(skb), udptable); 492 daddr, uh->dest, inet6_iif(skb), udptable);
488 493
489 if (sk == NULL) { 494 if (sk == NULL) {
@@ -492,7 +497,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
492 497
493 if (udp_lib_checksum_complete(skb)) 498 if (udp_lib_checksum_complete(skb))
494 goto discard; 499 goto discard;
495 UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 500 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
501 proto == IPPROTO_UDPLITE);
496 502
497 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 503 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
498 504
@@ -517,7 +523,7 @@ short_packet:
517 ulen, skb->len); 523 ulen, skb->len);
518 524
519discard: 525discard:
520 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 526 UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
521 kfree_skb(skb); 527 kfree_skb(skb);
522 return 0; 528 return 0;
523} 529}
@@ -587,7 +593,8 @@ out:
587 up->len = 0; 593 up->len = 0;
588 up->pending = 0; 594 up->pending = 0;
589 if (!err) 595 if (!err)
590 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); 596 UDP6_INC_STATS_USER(sock_net(sk),
597 UDP_MIB_OUTDATAGRAMS, is_udplite);
591 return err; 598 return err;
592} 599}
593 600
@@ -869,7 +876,8 @@ out:
869 * seems like overkill. 876 * seems like overkill.
870 */ 877 */
871 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 878 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
872 UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); 879 UDP6_INC_STATS_USER(sock_net(sk),
880 UDP_MIB_SNDBUFERRORS, is_udplite);
873 } 881 }
874 return err; 882 return err;
875 883
@@ -881,15 +889,13 @@ do_confirm:
881 goto out; 889 goto out;
882} 890}
883 891
884int udpv6_destroy_sock(struct sock *sk) 892void udpv6_destroy_sock(struct sock *sk)
885{ 893{
886 lock_sock(sk); 894 lock_sock(sk);
887 udp_v6_flush_pending_frames(sk); 895 udp_v6_flush_pending_frames(sk);
888 release_sock(sk); 896 release_sock(sk);
889 897
890 inet6_destroy_sock(sk); 898 inet6_destroy_sock(sk);
891
892 return 0;
893} 899}
894 900
895/* 901/*
@@ -955,7 +961,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
955 srcp = ntohs(inet->sport); 961 srcp = ntohs(inet->sport);
956 seq_printf(seq, 962 seq_printf(seq,
957 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 963 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
958 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n", 964 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
959 bucket, 965 bucket,
960 src->s6_addr32[0], src->s6_addr32[1], 966 src->s6_addr32[0], src->s6_addr32[1],
961 src->s6_addr32[2], src->s6_addr32[3], srcp, 967 src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -967,7 +973,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
967 0, 0L, 0, 973 0, 0L, 0,
968 sock_i_uid(sp), 0, 974 sock_i_uid(sp), 0,
969 sock_i_ino(sp), 975 sock_i_ino(sp),
970 atomic_read(&sp->sk_refcnt), sp); 976 atomic_read(&sp->sk_refcnt), sp,
977 atomic_read(&sp->sk_drops));
971} 978}
972 979
973int udp6_seq_show(struct seq_file *seq, void *v) 980int udp6_seq_show(struct seq_file *seq, void *v)
@@ -978,7 +985,7 @@ int udp6_seq_show(struct seq_file *seq, void *v)
978 "local_address " 985 "local_address "
979 "remote_address " 986 "remote_address "
980 "st tx_queue rx_queue tr tm->when retrnsmt" 987 "st tx_queue rx_queue tr tm->when retrnsmt"
981 " uid timeout inode\n"); 988 " uid timeout inode ref pointer drops\n");
982 else 989 else
983 udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); 990 udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket);
984 return 0; 991 return 0;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 321b81a4d418..92dd7da766d8 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -29,7 +29,7 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
29 struct msghdr *msg, size_t len, 29 struct msghdr *msg, size_t len,
30 int noblock, int flags, int *addr_len); 30 int noblock, int flags, int *addr_len);
31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
32extern int udpv6_destroy_sock(struct sock *sk); 32extern void udpv6_destroy_sock(struct sock *sk);
33 33
34#ifdef CONFIG_PROC_FS 34#ifdef CONFIG_PROC_FS
35extern int udp6_seq_show(struct seq_file *seq, void *v); 35extern int udp6_seq_show(struct seq_file *seq, void *v);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 491efd00a866..f6cdcb348e05 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -2,8 +2,6 @@
2 * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6. 2 * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6.
3 * See also net/ipv4/udplite.c 3 * See also net/ipv4/udplite.c
4 * 4 *
5 * Version: $Id: udplite.c,v 1.9 2006/10/19 08:28:10 gerrit Exp $
6 *
7 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> 5 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk>
8 * 6 *
9 * Changes: 7 * Changes:
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e84a70dd346b..6d8ae03c14f5 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -631,8 +631,8 @@ dev_irnet_poll(struct file * file,
631 * This is the way pppd configure us and control us while the PPP 631 * This is the way pppd configure us and control us while the PPP
632 * instance is active. 632 * instance is active.
633 */ 633 */
634static int 634static long
635dev_irnet_ioctl(struct inode * inode, 635dev_irnet_ioctl(
636 struct file * file, 636 struct file * file,
637 unsigned int cmd, 637 unsigned int cmd,
638 unsigned long arg) 638 unsigned long arg)
@@ -663,6 +663,7 @@ dev_irnet_ioctl(struct inode * inode,
663 { 663 {
664 DEBUG(FS_INFO, "Entering PPP discipline.\n"); 664 DEBUG(FS_INFO, "Entering PPP discipline.\n");
665 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ 665 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/
666 lock_kernel();
666 err = ppp_register_channel(&ap->chan); 667 err = ppp_register_channel(&ap->chan);
667 if(err == 0) 668 if(err == 0)
668 { 669 {
@@ -675,12 +676,14 @@ dev_irnet_ioctl(struct inode * inode,
675 } 676 }
676 else 677 else
677 DERROR(FS_ERROR, "Can't setup PPP channel...\n"); 678 DERROR(FS_ERROR, "Can't setup PPP channel...\n");
679 unlock_kernel();
678 } 680 }
679 else 681 else
680 { 682 {
681 /* In theory, should be N_TTY */ 683 /* In theory, should be N_TTY */
682 DEBUG(FS_INFO, "Exiting PPP discipline.\n"); 684 DEBUG(FS_INFO, "Exiting PPP discipline.\n");
683 /* Disconnect from the generic PPP layer */ 685 /* Disconnect from the generic PPP layer */
686 lock_kernel();
684 if(ap->ppp_open) 687 if(ap->ppp_open)
685 { 688 {
686 ap->ppp_open = 0; 689 ap->ppp_open = 0;
@@ -689,24 +692,20 @@ dev_irnet_ioctl(struct inode * inode,
689 else 692 else
690 DERROR(FS_ERROR, "Channel not registered !\n"); 693 DERROR(FS_ERROR, "Channel not registered !\n");
691 err = 0; 694 err = 0;
695 unlock_kernel();
692 } 696 }
693 break; 697 break;
694 698
695 /* Query PPP channel and unit number */ 699 /* Query PPP channel and unit number */
696 case PPPIOCGCHAN: 700 case PPPIOCGCHAN:
697 if(!ap->ppp_open) 701 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
698 break; 702 (int __user *)argp))
699 if(put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) 703 err = 0;
700 break;
701 DEBUG(FS_INFO, "Query channel.\n");
702 err = 0;
703 break; 704 break;
704 case PPPIOCGUNIT: 705 case PPPIOCGUNIT:
705 if(!ap->ppp_open) 706 lock_kernel();
706 break; 707 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
707 if(put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) 708 (int __user *)argp))
708 break;
709 DEBUG(FS_INFO, "Query unit number.\n");
710 err = 0; 709 err = 0;
711 break; 710 break;
712 711
@@ -726,34 +725,39 @@ dev_irnet_ioctl(struct inode * inode,
726 DEBUG(FS_INFO, "Standard PPP ioctl.\n"); 725 DEBUG(FS_INFO, "Standard PPP ioctl.\n");
727 if(!capable(CAP_NET_ADMIN)) 726 if(!capable(CAP_NET_ADMIN))
728 err = -EPERM; 727 err = -EPERM;
729 else 728 else {
729 lock_kernel();
730 err = ppp_irnet_ioctl(&ap->chan, cmd, arg); 730 err = ppp_irnet_ioctl(&ap->chan, cmd, arg);
731 unlock_kernel();
732 }
731 break; 733 break;
732 734
733 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ 735 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */
734 /* Get termios */ 736 /* Get termios */
735 case TCGETS: 737 case TCGETS:
736 DEBUG(FS_INFO, "Get termios.\n"); 738 DEBUG(FS_INFO, "Get termios.\n");
739 lock_kernel();
737#ifndef TCGETS2 740#ifndef TCGETS2
738 if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) 741 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
739 break; 742 err = 0;
740#else 743#else
741 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) 744 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
742 break; 745 err = 0;
743#endif 746#endif
744 err = 0; 747 unlock_kernel();
745 break; 748 break;
746 /* Set termios */ 749 /* Set termios */
747 case TCSETSF: 750 case TCSETSF:
748 DEBUG(FS_INFO, "Set termios.\n"); 751 DEBUG(FS_INFO, "Set termios.\n");
752 lock_kernel();
749#ifndef TCGETS2 753#ifndef TCGETS2
750 if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) 754 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
751 break; 755 err = 0;
752#else 756#else
753 if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) 757 if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
754 break; 758 err = 0;
755#endif 759#endif
756 err = 0; 760 unlock_kernel();
757 break; 761 break;
758 762
759 /* Set DTR/RTS */ 763 /* Set DTR/RTS */
@@ -776,7 +780,9 @@ dev_irnet_ioctl(struct inode * inode,
776 * We should also worry that we don't accept junk here and that 780 * We should also worry that we don't accept junk here and that
777 * we get rid of our own buffers */ 781 * we get rid of our own buffers */
778#ifdef FLUSH_TO_PPP 782#ifdef FLUSH_TO_PPP
783 lock_kernel();
779 ppp_output_wakeup(&ap->chan); 784 ppp_output_wakeup(&ap->chan);
785 unlock_kernel();
780#endif /* FLUSH_TO_PPP */ 786#endif /* FLUSH_TO_PPP */
781 err = 0; 787 err = 0;
782 break; 788 break;
@@ -791,7 +797,7 @@ dev_irnet_ioctl(struct inode * inode,
791 797
792 default: 798 default:
793 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); 799 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd);
794 err = -ENOIOCTLCMD; 800 err = -ENOTTY;
795 } 801 }
796 802
797 DEXIT(FS_TRACE, " - err = 0x%X\n", err); 803 DEXIT(FS_TRACE, " - err = 0x%X\n", err);
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index d2beb7df8f7f..d9f8bd4ebd05 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -76,9 +76,8 @@ static ssize_t
76static unsigned int 76static unsigned int
77 dev_irnet_poll(struct file *, 77 dev_irnet_poll(struct file *,
78 poll_table *); 78 poll_table *);
79static int 79static long
80 dev_irnet_ioctl(struct inode *, 80 dev_irnet_ioctl(struct file *,
81 struct file *,
82 unsigned int, 81 unsigned int,
83 unsigned long); 82 unsigned long);
84/* ------------------------ PPP INTERFACE ------------------------ */ 83/* ------------------------ PPP INTERFACE ------------------------ */
@@ -102,7 +101,7 @@ static struct file_operations irnet_device_fops =
102 .read = dev_irnet_read, 101 .read = dev_irnet_read,
103 .write = dev_irnet_write, 102 .write = dev_irnet_write,
104 .poll = dev_irnet_poll, 103 .poll = dev_irnet_poll,
105 .ioctl = dev_irnet_ioctl, 104 .unlocked_ioctl = dev_irnet_ioctl,
106 .open = dev_irnet_open, 105 .open = dev_irnet_open,
107 .release = dev_irnet_close 106 .release = dev_irnet_close
108 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */ 107 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index bda71015885c..29f7baa25110 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -644,6 +644,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
644 } 644 }
645 645
646 txmsg.class = 0; 646 txmsg.class = 0;
647 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
647 txmsg.tag = iucv->send_tag++; 648 txmsg.tag = iucv->send_tag++;
648 memcpy(skb->cb, &txmsg.tag, 4); 649 memcpy(skb->cb, &txmsg.tag, 4);
649 skb_queue_tail(&iucv->send_skb_q, skb); 650 skb_queue_tail(&iucv->send_skb_q, skb);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index cc34ac769a3c..a598c7384840 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -474,14 +474,14 @@ static void iucv_setmask_mp(void)
474{ 474{
475 int cpu; 475 int cpu;
476 476
477 preempt_disable(); 477 get_online_cpus();
478 for_each_online_cpu(cpu) 478 for_each_online_cpu(cpu)
479 /* Enable all cpus with a declared buffer. */ 479 /* Enable all cpus with a declared buffer. */
480 if (cpu_isset(cpu, iucv_buffer_cpumask) && 480 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
481 !cpu_isset(cpu, iucv_irq_cpumask)) 481 !cpu_isset(cpu, iucv_irq_cpumask))
482 smp_call_function_single(cpu, iucv_allow_cpu, 482 smp_call_function_single(cpu, iucv_allow_cpu,
483 NULL, 1); 483 NULL, 1);
484 preempt_enable(); 484 put_online_cpus();
485} 485}
486 486
487/** 487/**
@@ -521,16 +521,22 @@ static int iucv_enable(void)
521 goto out; 521 goto out;
522 /* Declare per cpu buffers. */ 522 /* Declare per cpu buffers. */
523 rc = -EIO; 523 rc = -EIO;
524 preempt_disable(); 524 get_online_cpus();
525 for_each_online_cpu(cpu) 525 for_each_online_cpu(cpu)
526<<<<<<< HEAD:net/iucv/iucv.c
527 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
528=======
526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 529 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
527 preempt_enable(); 530 preempt_enable();
531>>>>>>> 5b664cb235e97afbf34db9c4d77f08ebd725335e:net/iucv/iucv.c
528 if (cpus_empty(iucv_buffer_cpumask)) 532 if (cpus_empty(iucv_buffer_cpumask))
529 /* No cpu could declare an iucv buffer. */ 533 /* No cpu could declare an iucv buffer. */
530 goto out_path; 534 goto out_path;
535 put_online_cpus();
531 return 0; 536 return 0;
532 537
533out_path: 538out_path:
539 put_online_cpus();
534 kfree(iucv_path_table); 540 kfree(iucv_path_table);
535out: 541out:
536 return rc; 542 return rc;
@@ -545,7 +551,13 @@ out:
545 */ 551 */
546static void iucv_disable(void) 552static void iucv_disable(void)
547{ 553{
554<<<<<<< HEAD:net/iucv/iucv.c
555 get_online_cpus();
556 on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1);
557 put_online_cpus();
558=======
548 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 559 on_each_cpu(iucv_retrieve_cpu, NULL, 1);
560>>>>>>> 5b664cb235e97afbf34db9c4d77f08ebd725335e:net/iucv/iucv.c
549 kfree(iucv_path_table); 561 kfree(iucv_path_table);
550} 562}
551 563
@@ -564,8 +576,11 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
564 return NOTIFY_BAD; 576 return NOTIFY_BAD;
565 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 577 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
566 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 578 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
567 if (!iucv_param[cpu]) 579 if (!iucv_param[cpu]) {
580 kfree(iucv_irq_data[cpu]);
581 iucv_irq_data[cpu] = NULL;
568 return NOTIFY_BAD; 582 return NOTIFY_BAD;
583 }
569 break; 584 break;
570 case CPU_UP_CANCELED: 585 case CPU_UP_CANCELED:
571 case CPU_UP_CANCELED_FROZEN: 586 case CPU_UP_CANCELED_FROZEN:
@@ -598,7 +613,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
598 return NOTIFY_OK; 613 return NOTIFY_OK;
599} 614}
600 615
601static struct notifier_block __cpuinitdata iucv_cpu_notifier = { 616static struct notifier_block __refdata iucv_cpu_notifier = {
602 .notifier_call = iucv_cpu_notify, 617 .notifier_call = iucv_cpu_notify,
603}; 618};
604 619
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7470e367272b..f0fc46c8038d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -579,25 +579,43 @@ static uint8_t pfkey_proto_from_xfrm(uint8_t proto)
579 return (proto ? proto : IPSEC_PROTO_ANY); 579 return (proto ? proto : IPSEC_PROTO_ANY);
580} 580}
581 581
582static int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, 582static inline int pfkey_sockaddr_len(sa_family_t family)
583 xfrm_address_t *xaddr)
584{ 583{
585 switch (((struct sockaddr*)(addr + 1))->sa_family) { 584 switch (family) {
585 case AF_INET:
586 return sizeof(struct sockaddr_in);
587#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
588 case AF_INET6:
589 return sizeof(struct sockaddr_in6);
590#endif
591 }
592 return 0;
593}
594
595static
596int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
597{
598 switch (sa->sa_family) {
586 case AF_INET: 599 case AF_INET:
587 xaddr->a4 = 600 xaddr->a4 =
588 ((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr; 601 ((struct sockaddr_in *)sa)->sin_addr.s_addr;
589 return AF_INET; 602 return AF_INET;
590#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 603#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
591 case AF_INET6: 604 case AF_INET6:
592 memcpy(xaddr->a6, 605 memcpy(xaddr->a6,
593 &((struct sockaddr_in6 *)(addr + 1))->sin6_addr, 606 &((struct sockaddr_in6 *)sa)->sin6_addr,
594 sizeof(struct in6_addr)); 607 sizeof(struct in6_addr));
595 return AF_INET6; 608 return AF_INET6;
596#endif 609#endif
597 default:
598 return 0;
599 } 610 }
600 /* NOTREACHED */ 611 return 0;
612}
613
614static
615int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, xfrm_address_t *xaddr)
616{
617 return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1),
618 xaddr);
601} 619}
602 620
603static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs) 621static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs)
@@ -642,20 +660,11 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **
642} 660}
643 661
644#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) 662#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
663
645static int 664static int
646pfkey_sockaddr_size(sa_family_t family) 665pfkey_sockaddr_size(sa_family_t family)
647{ 666{
648 switch (family) { 667 return PFKEY_ALIGN8(pfkey_sockaddr_len(family));
649 case AF_INET:
650 return PFKEY_ALIGN8(sizeof(struct sockaddr_in));
651#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
652 case AF_INET6:
653 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6));
654#endif
655 default:
656 return 0;
657 }
658 /* NOTREACHED */
659} 668}
660 669
661static inline int pfkey_mode_from_xfrm(int mode) 670static inline int pfkey_mode_from_xfrm(int mode)
@@ -687,6 +696,36 @@ static inline int pfkey_mode_to_xfrm(int mode)
687 } 696 }
688} 697}
689 698
699static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
700 struct sockaddr *sa,
701 unsigned short family)
702{
703 switch (family) {
704 case AF_INET:
705 {
706 struct sockaddr_in *sin = (struct sockaddr_in *)sa;
707 sin->sin_family = AF_INET;
708 sin->sin_port = port;
709 sin->sin_addr.s_addr = xaddr->a4;
710 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
711 return 32;
712 }
713#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
714 case AF_INET6:
715 {
716 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
717 sin6->sin6_family = AF_INET6;
718 sin6->sin6_port = port;
719 sin6->sin6_flowinfo = 0;
720 ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6);
721 sin6->sin6_scope_id = 0;
722 return 128;
723 }
724#endif
725 }
726 return 0;
727}
728
690static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, 729static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
691 int add_keys, int hsc) 730 int add_keys, int hsc)
692{ 731{
@@ -697,13 +736,9 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
697 struct sadb_address *addr; 736 struct sadb_address *addr;
698 struct sadb_key *key; 737 struct sadb_key *key;
699 struct sadb_x_sa2 *sa2; 738 struct sadb_x_sa2 *sa2;
700 struct sockaddr_in *sin;
701 struct sadb_x_sec_ctx *sec_ctx; 739 struct sadb_x_sec_ctx *sec_ctx;
702 struct xfrm_sec_ctx *xfrm_ctx; 740 struct xfrm_sec_ctx *xfrm_ctx;
703 int ctx_size = 0; 741 int ctx_size = 0;
704#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
705 struct sockaddr_in6 *sin6;
706#endif
707 int size; 742 int size;
708 int auth_key_size = 0; 743 int auth_key_size = 0;
709 int encrypt_key_size = 0; 744 int encrypt_key_size = 0;
@@ -732,14 +767,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
732 } 767 }
733 768
734 /* identity & sensitivity */ 769 /* identity & sensitivity */
735 770 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr, x->props.family))
736 if ((x->props.family == AF_INET &&
737 x->sel.saddr.a4 != x->props.saddr.a4)
738#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
739 || (x->props.family == AF_INET6 &&
740 memcmp (x->sel.saddr.a6, x->props.saddr.a6, sizeof (struct in6_addr)))
741#endif
742 )
743 size += sizeof(struct sadb_address) + sockaddr_size; 771 size += sizeof(struct sadb_address) + sockaddr_size;
744 772
745 if (add_keys) { 773 if (add_keys) {
@@ -861,29 +889,12 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
861 protocol's number." - RFC2367 */ 889 protocol's number." - RFC2367 */
862 addr->sadb_address_proto = 0; 890 addr->sadb_address_proto = 0;
863 addr->sadb_address_reserved = 0; 891 addr->sadb_address_reserved = 0;
864 if (x->props.family == AF_INET) {
865 addr->sadb_address_prefixlen = 32;
866 892
867 sin = (struct sockaddr_in *) (addr + 1); 893 addr->sadb_address_prefixlen =
868 sin->sin_family = AF_INET; 894 pfkey_sockaddr_fill(&x->props.saddr, 0,
869 sin->sin_addr.s_addr = x->props.saddr.a4; 895 (struct sockaddr *) (addr + 1),
870 sin->sin_port = 0; 896 x->props.family);
871 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 897 if (!addr->sadb_address_prefixlen)
872 }
873#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
874 else if (x->props.family == AF_INET6) {
875 addr->sadb_address_prefixlen = 128;
876
877 sin6 = (struct sockaddr_in6 *) (addr + 1);
878 sin6->sin6_family = AF_INET6;
879 sin6->sin6_port = 0;
880 sin6->sin6_flowinfo = 0;
881 memcpy(&sin6->sin6_addr, x->props.saddr.a6,
882 sizeof(struct in6_addr));
883 sin6->sin6_scope_id = 0;
884 }
885#endif
886 else
887 BUG(); 898 BUG();
888 899
889 /* dst address */ 900 /* dst address */
@@ -894,70 +905,32 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
894 sizeof(uint64_t); 905 sizeof(uint64_t);
895 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 906 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
896 addr->sadb_address_proto = 0; 907 addr->sadb_address_proto = 0;
897 addr->sadb_address_prefixlen = 32; /* XXX */
898 addr->sadb_address_reserved = 0; 908 addr->sadb_address_reserved = 0;
899 if (x->props.family == AF_INET) {
900 sin = (struct sockaddr_in *) (addr + 1);
901 sin->sin_family = AF_INET;
902 sin->sin_addr.s_addr = x->id.daddr.a4;
903 sin->sin_port = 0;
904 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
905 909
906 if (x->sel.saddr.a4 != x->props.saddr.a4) { 910 addr->sadb_address_prefixlen =
907 addr = (struct sadb_address*) skb_put(skb, 911 pfkey_sockaddr_fill(&x->id.daddr, 0,
908 sizeof(struct sadb_address)+sockaddr_size); 912 (struct sockaddr *) (addr + 1),
909 addr->sadb_address_len = 913 x->props.family);
910 (sizeof(struct sadb_address)+sockaddr_size)/ 914 if (!addr->sadb_address_prefixlen)
911 sizeof(uint64_t); 915 BUG();
912 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
913 addr->sadb_address_proto =
914 pfkey_proto_from_xfrm(x->sel.proto);
915 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
916 addr->sadb_address_reserved = 0;
917
918 sin = (struct sockaddr_in *) (addr + 1);
919 sin->sin_family = AF_INET;
920 sin->sin_addr.s_addr = x->sel.saddr.a4;
921 sin->sin_port = x->sel.sport;
922 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
923 }
924 }
925#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
926 else if (x->props.family == AF_INET6) {
927 addr->sadb_address_prefixlen = 128;
928 916
929 sin6 = (struct sockaddr_in6 *) (addr + 1); 917 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr,
930 sin6->sin6_family = AF_INET6; 918 x->props.family)) {
931 sin6->sin6_port = 0; 919 addr = (struct sadb_address*) skb_put(skb,
932 sin6->sin6_flowinfo = 0; 920 sizeof(struct sadb_address)+sockaddr_size);
933 memcpy(&sin6->sin6_addr, x->id.daddr.a6, sizeof(struct in6_addr)); 921 addr->sadb_address_len =
934 sin6->sin6_scope_id = 0; 922 (sizeof(struct sadb_address)+sockaddr_size)/
923 sizeof(uint64_t);
924 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
925 addr->sadb_address_proto =
926 pfkey_proto_from_xfrm(x->sel.proto);
927 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
928 addr->sadb_address_reserved = 0;
935 929
936 if (memcmp (x->sel.saddr.a6, x->props.saddr.a6, 930 pfkey_sockaddr_fill(&x->sel.saddr, x->sel.sport,
937 sizeof(struct in6_addr))) { 931 (struct sockaddr *) (addr + 1),
938 addr = (struct sadb_address *) skb_put(skb, 932 x->props.family);
939 sizeof(struct sadb_address)+sockaddr_size);
940 addr->sadb_address_len =
941 (sizeof(struct sadb_address)+sockaddr_size)/
942 sizeof(uint64_t);
943 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
944 addr->sadb_address_proto =
945 pfkey_proto_from_xfrm(x->sel.proto);
946 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
947 addr->sadb_address_reserved = 0;
948
949 sin6 = (struct sockaddr_in6 *) (addr + 1);
950 sin6->sin6_family = AF_INET6;
951 sin6->sin6_port = x->sel.sport;
952 sin6->sin6_flowinfo = 0;
953 memcpy(&sin6->sin6_addr, x->sel.saddr.a6,
954 sizeof(struct in6_addr));
955 sin6->sin6_scope_id = 0;
956 }
957 } 933 }
958#endif
959 else
960 BUG();
961 934
962 /* auth key */ 935 /* auth key */
963 if (add_keys && auth_key_size) { 936 if (add_keys && auth_key_size) {
@@ -1853,10 +1826,6 @@ static int
1853parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) 1826parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1854{ 1827{
1855 struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr; 1828 struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
1856 struct sockaddr_in *sin;
1857#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1858 struct sockaddr_in6 *sin6;
1859#endif
1860 int mode; 1829 int mode;
1861 1830
1862 if (xp->xfrm_nr >= XFRM_MAX_DEPTH) 1831 if (xp->xfrm_nr >= XFRM_MAX_DEPTH)
@@ -1881,31 +1850,19 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1881 1850
1882 /* addresses present only in tunnel mode */ 1851 /* addresses present only in tunnel mode */
1883 if (t->mode == XFRM_MODE_TUNNEL) { 1852 if (t->mode == XFRM_MODE_TUNNEL) {
1884 struct sockaddr *sa; 1853 u8 *sa = (u8 *) (rq + 1);
1885 sa = (struct sockaddr *)(rq+1); 1854 int family, socklen;
1886 switch(sa->sa_family) { 1855
1887 case AF_INET: 1856 family = pfkey_sockaddr_extract((struct sockaddr *)sa,
1888 sin = (struct sockaddr_in*)sa; 1857 &t->saddr);
1889 t->saddr.a4 = sin->sin_addr.s_addr; 1858 if (!family)
1890 sin++;
1891 if (sin->sin_family != AF_INET)
1892 return -EINVAL;
1893 t->id.daddr.a4 = sin->sin_addr.s_addr;
1894 break;
1895#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1896 case AF_INET6:
1897 sin6 = (struct sockaddr_in6*)sa;
1898 memcpy(t->saddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr));
1899 sin6++;
1900 if (sin6->sin6_family != AF_INET6)
1901 return -EINVAL;
1902 memcpy(t->id.daddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr));
1903 break;
1904#endif
1905 default:
1906 return -EINVAL; 1859 return -EINVAL;
1907 } 1860
1908 t->encap_family = sa->sa_family; 1861 socklen = pfkey_sockaddr_len(family);
1862 if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
1863 &t->id.daddr) != family)
1864 return -EINVAL;
1865 t->encap_family = family;
1909 } else 1866 } else
1910 t->encap_family = xp->family; 1867 t->encap_family = xp->family;
1911 1868
@@ -1952,9 +1909,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
1952 1909
1953 for (i=0; i<xp->xfrm_nr; i++) { 1910 for (i=0; i<xp->xfrm_nr; i++) {
1954 t = xp->xfrm_vec + i; 1911 t = xp->xfrm_vec + i;
1955 socklen += (t->encap_family == AF_INET ? 1912 socklen += pfkey_sockaddr_len(t->encap_family);
1956 sizeof(struct sockaddr_in) :
1957 sizeof(struct sockaddr_in6));
1958 } 1913 }
1959 1914
1960 return sizeof(struct sadb_msg) + 1915 return sizeof(struct sadb_msg) +
@@ -1987,18 +1942,12 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
1987 struct sadb_address *addr; 1942 struct sadb_address *addr;
1988 struct sadb_lifetime *lifetime; 1943 struct sadb_lifetime *lifetime;
1989 struct sadb_x_policy *pol; 1944 struct sadb_x_policy *pol;
1990 struct sockaddr_in *sin;
1991 struct sadb_x_sec_ctx *sec_ctx; 1945 struct sadb_x_sec_ctx *sec_ctx;
1992 struct xfrm_sec_ctx *xfrm_ctx; 1946 struct xfrm_sec_ctx *xfrm_ctx;
1993#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1994 struct sockaddr_in6 *sin6;
1995#endif
1996 int i; 1947 int i;
1997 int size; 1948 int size;
1998 int sockaddr_size = pfkey_sockaddr_size(xp->family); 1949 int sockaddr_size = pfkey_sockaddr_size(xp->family);
1999 int socklen = (xp->family == AF_INET ? 1950 int socklen = pfkey_sockaddr_len(xp->family);
2000 sizeof(struct sockaddr_in) :
2001 sizeof(struct sockaddr_in6));
2002 1951
2003 size = pfkey_xfrm_policy2msg_size(xp); 1952 size = pfkey_xfrm_policy2msg_size(xp);
2004 1953
@@ -2016,26 +1965,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2016 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); 1965 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
2017 addr->sadb_address_prefixlen = xp->selector.prefixlen_s; 1966 addr->sadb_address_prefixlen = xp->selector.prefixlen_s;
2018 addr->sadb_address_reserved = 0; 1967 addr->sadb_address_reserved = 0;
2019 /* src address */ 1968 if (!pfkey_sockaddr_fill(&xp->selector.saddr,
2020 if (xp->family == AF_INET) { 1969 xp->selector.sport,
2021 sin = (struct sockaddr_in *) (addr + 1); 1970 (struct sockaddr *) (addr + 1),
2022 sin->sin_family = AF_INET; 1971 xp->family))
2023 sin->sin_addr.s_addr = xp->selector.saddr.a4;
2024 sin->sin_port = xp->selector.sport;
2025 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2026 }
2027#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2028 else if (xp->family == AF_INET6) {
2029 sin6 = (struct sockaddr_in6 *) (addr + 1);
2030 sin6->sin6_family = AF_INET6;
2031 sin6->sin6_port = xp->selector.sport;
2032 sin6->sin6_flowinfo = 0;
2033 memcpy(&sin6->sin6_addr, xp->selector.saddr.a6,
2034 sizeof(struct in6_addr));
2035 sin6->sin6_scope_id = 0;
2036 }
2037#endif
2038 else
2039 BUG(); 1972 BUG();
2040 1973
2041 /* dst address */ 1974 /* dst address */
@@ -2048,26 +1981,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2048 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); 1981 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
2049 addr->sadb_address_prefixlen = xp->selector.prefixlen_d; 1982 addr->sadb_address_prefixlen = xp->selector.prefixlen_d;
2050 addr->sadb_address_reserved = 0; 1983 addr->sadb_address_reserved = 0;
2051 if (xp->family == AF_INET) { 1984
2052 sin = (struct sockaddr_in *) (addr + 1); 1985 pfkey_sockaddr_fill(&xp->selector.daddr, xp->selector.dport,
2053 sin->sin_family = AF_INET; 1986 (struct sockaddr *) (addr + 1),
2054 sin->sin_addr.s_addr = xp->selector.daddr.a4; 1987 xp->family);
2055 sin->sin_port = xp->selector.dport;
2056 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2057 }
2058#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2059 else if (xp->family == AF_INET6) {
2060 sin6 = (struct sockaddr_in6 *) (addr + 1);
2061 sin6->sin6_family = AF_INET6;
2062 sin6->sin6_port = xp->selector.dport;
2063 sin6->sin6_flowinfo = 0;
2064 memcpy(&sin6->sin6_addr, xp->selector.daddr.a6,
2065 sizeof(struct in6_addr));
2066 sin6->sin6_scope_id = 0;
2067 }
2068#endif
2069 else
2070 BUG();
2071 1988
2072 /* hard time */ 1989 /* hard time */
2073 lifetime = (struct sadb_lifetime *) skb_put(skb, 1990 lifetime = (struct sadb_lifetime *) skb_put(skb,
@@ -2121,12 +2038,13 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2121 int mode; 2038 int mode;
2122 2039
2123 req_size = sizeof(struct sadb_x_ipsecrequest); 2040 req_size = sizeof(struct sadb_x_ipsecrequest);
2124 if (t->mode == XFRM_MODE_TUNNEL) 2041 if (t->mode == XFRM_MODE_TUNNEL) {
2125 req_size += ((t->encap_family == AF_INET ? 2042 socklen = pfkey_sockaddr_len(t->encap_family);
2126 sizeof(struct sockaddr_in) : 2043 req_size += socklen * 2;
2127 sizeof(struct sockaddr_in6)) * 2); 2044 } else {
2128 else
2129 size -= 2*socklen; 2045 size -= 2*socklen;
2046 socklen = 0;
2047 }
2130 rq = (void*)skb_put(skb, req_size); 2048 rq = (void*)skb_put(skb, req_size);
2131 pol->sadb_x_policy_len += req_size/8; 2049 pol->sadb_x_policy_len += req_size/8;
2132 memset(rq, 0, sizeof(*rq)); 2050 memset(rq, 0, sizeof(*rq));
@@ -2141,42 +2059,15 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2141 if (t->optional) 2059 if (t->optional)
2142 rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; 2060 rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
2143 rq->sadb_x_ipsecrequest_reqid = t->reqid; 2061 rq->sadb_x_ipsecrequest_reqid = t->reqid;
2062
2144 if (t->mode == XFRM_MODE_TUNNEL) { 2063 if (t->mode == XFRM_MODE_TUNNEL) {
2145 switch (t->encap_family) { 2064 u8 *sa = (void *)(rq + 1);
2146 case AF_INET: 2065 pfkey_sockaddr_fill(&t->saddr, 0,
2147 sin = (void*)(rq+1); 2066 (struct sockaddr *)sa,
2148 sin->sin_family = AF_INET; 2067 t->encap_family);
2149 sin->sin_addr.s_addr = t->saddr.a4; 2068 pfkey_sockaddr_fill(&t->id.daddr, 0,
2150 sin->sin_port = 0; 2069 (struct sockaddr *) (sa + socklen),
2151 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 2070 t->encap_family);
2152 sin++;
2153 sin->sin_family = AF_INET;
2154 sin->sin_addr.s_addr = t->id.daddr.a4;
2155 sin->sin_port = 0;
2156 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2157 break;
2158#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2159 case AF_INET6:
2160 sin6 = (void*)(rq+1);
2161 sin6->sin6_family = AF_INET6;
2162 sin6->sin6_port = 0;
2163 sin6->sin6_flowinfo = 0;
2164 memcpy(&sin6->sin6_addr, t->saddr.a6,
2165 sizeof(struct in6_addr));
2166 sin6->sin6_scope_id = 0;
2167
2168 sin6++;
2169 sin6->sin6_family = AF_INET6;
2170 sin6->sin6_port = 0;
2171 sin6->sin6_flowinfo = 0;
2172 memcpy(&sin6->sin6_addr, t->id.daddr.a6,
2173 sizeof(struct in6_addr));
2174 sin6->sin6_scope_id = 0;
2175 break;
2176#endif
2177 default:
2178 break;
2179 }
2180 } 2071 }
2181 } 2072 }
2182 2073
@@ -2459,61 +2350,31 @@ out:
2459#ifdef CONFIG_NET_KEY_MIGRATE 2350#ifdef CONFIG_NET_KEY_MIGRATE
2460static int pfkey_sockaddr_pair_size(sa_family_t family) 2351static int pfkey_sockaddr_pair_size(sa_family_t family)
2461{ 2352{
2462 switch (family) { 2353 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
2463 case AF_INET:
2464 return PFKEY_ALIGN8(sizeof(struct sockaddr_in) * 2);
2465#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2466 case AF_INET6:
2467 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6) * 2);
2468#endif
2469 default:
2470 return 0;
2471 }
2472 /* NOTREACHED */
2473} 2354}
2474 2355
2475static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq, 2356static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq,
2476 xfrm_address_t *saddr, xfrm_address_t *daddr, 2357 xfrm_address_t *saddr, xfrm_address_t *daddr,
2477 u16 *family) 2358 u16 *family)
2478{ 2359{
2479 struct sockaddr *sa = (struct sockaddr *)(rq + 1); 2360 u8 *sa = (u8 *) (rq + 1);
2361 int af, socklen;
2362
2480 if (rq->sadb_x_ipsecrequest_len < 2363 if (rq->sadb_x_ipsecrequest_len <
2481 pfkey_sockaddr_pair_size(sa->sa_family)) 2364 pfkey_sockaddr_pair_size(((struct sockaddr *)sa)->sa_family))
2482 return -EINVAL; 2365 return -EINVAL;
2483 2366
2484 switch (sa->sa_family) { 2367 af = pfkey_sockaddr_extract((struct sockaddr *) sa,
2485 case AF_INET: 2368 saddr);
2486 { 2369 if (!af)
2487 struct sockaddr_in *sin;
2488 sin = (struct sockaddr_in *)sa;
2489 if ((sin+1)->sin_family != AF_INET)
2490 return -EINVAL;
2491 memcpy(&saddr->a4, &sin->sin_addr, sizeof(saddr->a4));
2492 sin++;
2493 memcpy(&daddr->a4, &sin->sin_addr, sizeof(daddr->a4));
2494 *family = AF_INET;
2495 break;
2496 }
2497#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2498 case AF_INET6:
2499 {
2500 struct sockaddr_in6 *sin6;
2501 sin6 = (struct sockaddr_in6 *)sa;
2502 if ((sin6+1)->sin6_family != AF_INET6)
2503 return -EINVAL;
2504 memcpy(&saddr->a6, &sin6->sin6_addr,
2505 sizeof(saddr->a6));
2506 sin6++;
2507 memcpy(&daddr->a6, &sin6->sin6_addr,
2508 sizeof(daddr->a6));
2509 *family = AF_INET6;
2510 break;
2511 }
2512#endif
2513 default:
2514 return -EINVAL; 2370 return -EINVAL;
2515 }
2516 2371
2372 socklen = pfkey_sockaddr_len(af);
2373 if (pfkey_sockaddr_extract((struct sockaddr *) (sa + socklen),
2374 daddr) != af)
2375 return -EINVAL;
2376
2377 *family = af;
2517 return 0; 2378 return 0;
2518} 2379}
2519 2380
@@ -3094,10 +2955,6 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3094 struct sadb_msg *hdr; 2955 struct sadb_msg *hdr;
3095 struct sadb_address *addr; 2956 struct sadb_address *addr;
3096 struct sadb_x_policy *pol; 2957 struct sadb_x_policy *pol;
3097 struct sockaddr_in *sin;
3098#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3099 struct sockaddr_in6 *sin6;
3100#endif
3101 int sockaddr_size; 2958 int sockaddr_size;
3102 int size; 2959 int size;
3103 struct sadb_x_sec_ctx *sec_ctx; 2960 struct sadb_x_sec_ctx *sec_ctx;
@@ -3146,29 +3003,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3146 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 3003 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
3147 addr->sadb_address_proto = 0; 3004 addr->sadb_address_proto = 0;
3148 addr->sadb_address_reserved = 0; 3005 addr->sadb_address_reserved = 0;
3149 if (x->props.family == AF_INET) { 3006 addr->sadb_address_prefixlen =
3150 addr->sadb_address_prefixlen = 32; 3007 pfkey_sockaddr_fill(&x->props.saddr, 0,
3151 3008 (struct sockaddr *) (addr + 1),
3152 sin = (struct sockaddr_in *) (addr + 1); 3009 x->props.family);
3153 sin->sin_family = AF_INET; 3010 if (!addr->sadb_address_prefixlen)
3154 sin->sin_addr.s_addr = x->props.saddr.a4;
3155 sin->sin_port = 0;
3156 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3157 }
3158#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3159 else if (x->props.family == AF_INET6) {
3160 addr->sadb_address_prefixlen = 128;
3161
3162 sin6 = (struct sockaddr_in6 *) (addr + 1);
3163 sin6->sin6_family = AF_INET6;
3164 sin6->sin6_port = 0;
3165 sin6->sin6_flowinfo = 0;
3166 memcpy(&sin6->sin6_addr,
3167 x->props.saddr.a6, sizeof(struct in6_addr));
3168 sin6->sin6_scope_id = 0;
3169 }
3170#endif
3171 else
3172 BUG(); 3011 BUG();
3173 3012
3174 /* dst address */ 3013 /* dst address */
@@ -3180,29 +3019,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3180 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 3019 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
3181 addr->sadb_address_proto = 0; 3020 addr->sadb_address_proto = 0;
3182 addr->sadb_address_reserved = 0; 3021 addr->sadb_address_reserved = 0;
3183 if (x->props.family == AF_INET) { 3022 addr->sadb_address_prefixlen =
3184 addr->sadb_address_prefixlen = 32; 3023 pfkey_sockaddr_fill(&x->id.daddr, 0,
3185 3024 (struct sockaddr *) (addr + 1),
3186 sin = (struct sockaddr_in *) (addr + 1); 3025 x->props.family);
3187 sin->sin_family = AF_INET; 3026 if (!addr->sadb_address_prefixlen)
3188 sin->sin_addr.s_addr = x->id.daddr.a4;
3189 sin->sin_port = 0;
3190 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3191 }
3192#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3193 else if (x->props.family == AF_INET6) {
3194 addr->sadb_address_prefixlen = 128;
3195
3196 sin6 = (struct sockaddr_in6 *) (addr + 1);
3197 sin6->sin6_family = AF_INET6;
3198 sin6->sin6_port = 0;
3199 sin6->sin6_flowinfo = 0;
3200 memcpy(&sin6->sin6_addr,
3201 x->id.daddr.a6, sizeof(struct in6_addr));
3202 sin6->sin6_scope_id = 0;
3203 }
3204#endif
3205 else
3206 BUG(); 3027 BUG();
3207 3028
3208 pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); 3029 pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy));
@@ -3328,10 +3149,6 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3328 struct sadb_sa *sa; 3149 struct sadb_sa *sa;
3329 struct sadb_address *addr; 3150 struct sadb_address *addr;
3330 struct sadb_x_nat_t_port *n_port; 3151 struct sadb_x_nat_t_port *n_port;
3331 struct sockaddr_in *sin;
3332#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3333 struct sockaddr_in6 *sin6;
3334#endif
3335 int sockaddr_size; 3152 int sockaddr_size;
3336 int size; 3153 int size;
3337 __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0); 3154 __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0);
@@ -3395,29 +3212,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3395 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 3212 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
3396 addr->sadb_address_proto = 0; 3213 addr->sadb_address_proto = 0;
3397 addr->sadb_address_reserved = 0; 3214 addr->sadb_address_reserved = 0;
3398 if (x->props.family == AF_INET) { 3215 addr->sadb_address_prefixlen =
3399 addr->sadb_address_prefixlen = 32; 3216 pfkey_sockaddr_fill(&x->props.saddr, 0,
3400 3217 (struct sockaddr *) (addr + 1),
3401 sin = (struct sockaddr_in *) (addr + 1); 3218 x->props.family);
3402 sin->sin_family = AF_INET; 3219 if (!addr->sadb_address_prefixlen)
3403 sin->sin_addr.s_addr = x->props.saddr.a4;
3404 sin->sin_port = 0;
3405 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3406 }
3407#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3408 else if (x->props.family == AF_INET6) {
3409 addr->sadb_address_prefixlen = 128;
3410
3411 sin6 = (struct sockaddr_in6 *) (addr + 1);
3412 sin6->sin6_family = AF_INET6;
3413 sin6->sin6_port = 0;
3414 sin6->sin6_flowinfo = 0;
3415 memcpy(&sin6->sin6_addr,
3416 x->props.saddr.a6, sizeof(struct in6_addr));
3417 sin6->sin6_scope_id = 0;
3418 }
3419#endif
3420 else
3421 BUG(); 3220 BUG();
3422 3221
3423 /* NAT_T_SPORT (old port) */ 3222 /* NAT_T_SPORT (old port) */
@@ -3436,28 +3235,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3436 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 3235 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
3437 addr->sadb_address_proto = 0; 3236 addr->sadb_address_proto = 0;
3438 addr->sadb_address_reserved = 0; 3237 addr->sadb_address_reserved = 0;
3439 if (x->props.family == AF_INET) { 3238 addr->sadb_address_prefixlen =
3440 addr->sadb_address_prefixlen = 32; 3239 pfkey_sockaddr_fill(ipaddr, 0,
3441 3240 (struct sockaddr *) (addr + 1),
3442 sin = (struct sockaddr_in *) (addr + 1); 3241 x->props.family);
3443 sin->sin_family = AF_INET; 3242 if (!addr->sadb_address_prefixlen)
3444 sin->sin_addr.s_addr = ipaddr->a4;
3445 sin->sin_port = 0;
3446 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3447 }
3448#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3449 else if (x->props.family == AF_INET6) {
3450 addr->sadb_address_prefixlen = 128;
3451
3452 sin6 = (struct sockaddr_in6 *) (addr + 1);
3453 sin6->sin6_family = AF_INET6;
3454 sin6->sin6_port = 0;
3455 sin6->sin6_flowinfo = 0;
3456 memcpy(&sin6->sin6_addr, &ipaddr->a6, sizeof(struct in6_addr));
3457 sin6->sin6_scope_id = 0;
3458 }
3459#endif
3460 else
3461 BUG(); 3243 BUG();
3462 3244
3463 /* NAT_T_DPORT (new port) */ 3245 /* NAT_T_DPORT (new port) */
@@ -3475,10 +3257,6 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3475 struct xfrm_selector *sel) 3257 struct xfrm_selector *sel)
3476{ 3258{
3477 struct sadb_address *addr; 3259 struct sadb_address *addr;
3478 struct sockaddr_in *sin;
3479#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3480 struct sockaddr_in6 *sin6;
3481#endif
3482 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize); 3260 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
3483 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8; 3261 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
3484 addr->sadb_address_exttype = type; 3262 addr->sadb_address_exttype = type;
@@ -3487,50 +3265,16 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3487 3265
3488 switch (type) { 3266 switch (type) {
3489 case SADB_EXT_ADDRESS_SRC: 3267 case SADB_EXT_ADDRESS_SRC:
3490 if (sel->family == AF_INET) { 3268 addr->sadb_address_prefixlen = sel->prefixlen_s;
3491 addr->sadb_address_prefixlen = sel->prefixlen_s; 3269 pfkey_sockaddr_fill(&sel->saddr, 0,
3492 sin = (struct sockaddr_in *)(addr + 1); 3270 (struct sockaddr *)(addr + 1),
3493 sin->sin_family = AF_INET; 3271 sel->family);
3494 memcpy(&sin->sin_addr.s_addr, &sel->saddr,
3495 sizeof(sin->sin_addr.s_addr));
3496 sin->sin_port = 0;
3497 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3498 }
3499#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3500 else if (sel->family == AF_INET6) {
3501 addr->sadb_address_prefixlen = sel->prefixlen_s;
3502 sin6 = (struct sockaddr_in6 *)(addr + 1);
3503 sin6->sin6_family = AF_INET6;
3504 sin6->sin6_port = 0;
3505 sin6->sin6_flowinfo = 0;
3506 sin6->sin6_scope_id = 0;
3507 memcpy(&sin6->sin6_addr.s6_addr, &sel->saddr,
3508 sizeof(sin6->sin6_addr.s6_addr));
3509 }
3510#endif
3511 break; 3272 break;
3512 case SADB_EXT_ADDRESS_DST: 3273 case SADB_EXT_ADDRESS_DST:
3513 if (sel->family == AF_INET) { 3274 addr->sadb_address_prefixlen = sel->prefixlen_d;
3514 addr->sadb_address_prefixlen = sel->prefixlen_d; 3275 pfkey_sockaddr_fill(&sel->daddr, 0,
3515 sin = (struct sockaddr_in *)(addr + 1); 3276 (struct sockaddr *)(addr + 1),
3516 sin->sin_family = AF_INET; 3277 sel->family);
3517 memcpy(&sin->sin_addr.s_addr, &sel->daddr,
3518 sizeof(sin->sin_addr.s_addr));
3519 sin->sin_port = 0;
3520 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3521 }
3522#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3523 else if (sel->family == AF_INET6) {
3524 addr->sadb_address_prefixlen = sel->prefixlen_d;
3525 sin6 = (struct sockaddr_in6 *)(addr + 1);
3526 sin6->sin6_family = AF_INET6;
3527 sin6->sin6_port = 0;
3528 sin6->sin6_flowinfo = 0;
3529 sin6->sin6_scope_id = 0;
3530 memcpy(&sin6->sin6_addr.s6_addr, &sel->daddr,
3531 sizeof(sin6->sin6_addr.s6_addr));
3532 }
3533#endif
3534 break; 3278 break;
3535 default: 3279 default:
3536 return -EINVAL; 3280 return -EINVAL;
@@ -3545,10 +3289,8 @@ static int set_ipsecrequest(struct sk_buff *skb,
3545 xfrm_address_t *src, xfrm_address_t *dst) 3289 xfrm_address_t *src, xfrm_address_t *dst)
3546{ 3290{
3547 struct sadb_x_ipsecrequest *rq; 3291 struct sadb_x_ipsecrequest *rq;
3548 struct sockaddr_in *sin; 3292 u8 *sa;
3549#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3293 int socklen = pfkey_sockaddr_len(family);
3550 struct sockaddr_in6 *sin6;
3551#endif
3552 int size_req; 3294 int size_req;
3553 3295
3554 size_req = sizeof(struct sadb_x_ipsecrequest) + 3296 size_req = sizeof(struct sadb_x_ipsecrequest) +
@@ -3562,38 +3304,10 @@ static int set_ipsecrequest(struct sk_buff *skb,
3562 rq->sadb_x_ipsecrequest_level = level; 3304 rq->sadb_x_ipsecrequest_level = level;
3563 rq->sadb_x_ipsecrequest_reqid = reqid; 3305 rq->sadb_x_ipsecrequest_reqid = reqid;
3564 3306
3565 switch (family) { 3307 sa = (u8 *) (rq + 1);
3566 case AF_INET: 3308 if (!pfkey_sockaddr_fill(src, 0, (struct sockaddr *)sa, family) ||
3567 sin = (struct sockaddr_in *)(rq + 1); 3309 !pfkey_sockaddr_fill(dst, 0, (struct sockaddr *)(sa + socklen), family))
3568 sin->sin_family = AF_INET;
3569 memcpy(&sin->sin_addr.s_addr, src,
3570 sizeof(sin->sin_addr.s_addr));
3571 sin++;
3572 sin->sin_family = AF_INET;
3573 memcpy(&sin->sin_addr.s_addr, dst,
3574 sizeof(sin->sin_addr.s_addr));
3575 break;
3576#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3577 case AF_INET6:
3578 sin6 = (struct sockaddr_in6 *)(rq + 1);
3579 sin6->sin6_family = AF_INET6;
3580 sin6->sin6_port = 0;
3581 sin6->sin6_flowinfo = 0;
3582 sin6->sin6_scope_id = 0;
3583 memcpy(&sin6->sin6_addr.s6_addr, src,
3584 sizeof(sin6->sin6_addr.s6_addr));
3585 sin6++;
3586 sin6->sin6_family = AF_INET6;
3587 sin6->sin6_port = 0;
3588 sin6->sin6_flowinfo = 0;
3589 sin6->sin6_scope_id = 0;
3590 memcpy(&sin6->sin6_addr.s6_addr, dst,
3591 sizeof(sin6->sin6_addr.s6_addr));
3592 break;
3593#endif
3594 default:
3595 return -EINVAL; 3310 return -EINVAL;
3596 }
3597 3311
3598 return 0; 3312 return 0;
3599} 3313}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 97101dcde4c0..5bcc452a247f 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -128,10 +128,8 @@ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
128 128
129static void llc_ui_sk_init(struct socket *sock, struct sock *sk) 129static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
130{ 130{
131 sock_graft(sk, sock);
131 sk->sk_type = sock->type; 132 sk->sk_type = sock->type;
132 sk->sk_sleep = &sock->wait;
133 sk->sk_socket = sock;
134 sock->sk = sk;
135 sock->ops = &llc_ui_ops; 133 sock->ops = &llc_ui_ops;
136} 134}
137 135
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a24b459dd45a..80d693392b0f 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -7,7 +7,6 @@ config MAC80211
7 select CRC32 7 select CRC32
8 select WIRELESS_EXT 8 select WIRELESS_EXT
9 select CFG80211 9 select CFG80211
10 select NET_SCH_FIFO
11 ---help--- 10 ---help---
12 This option enables the hardware independent IEEE 802.11 11 This option enables the hardware independent IEEE 802.11
13 networking stack. 12 networking stack.
@@ -15,6 +14,14 @@ config MAC80211
15menu "Rate control algorithm selection" 14menu "Rate control algorithm selection"
16 depends on MAC80211 != n 15 depends on MAC80211 != n
17 16
17config MAC80211_RC_PID
18 bool "PID controller based rate control algorithm" if EMBEDDED
19 default y
20 ---help---
21 This option enables a TX rate control algorithm for
22 mac80211 that uses a PID controller to select the TX
23 rate.
24
18choice 25choice
19 prompt "Default rate control algorithm" 26 prompt "Default rate control algorithm"
20 default MAC80211_RC_DEFAULT_PID 27 default MAC80211_RC_DEFAULT_PID
@@ -26,40 +33,19 @@ choice
26 33
27config MAC80211_RC_DEFAULT_PID 34config MAC80211_RC_DEFAULT_PID
28 bool "PID controller based rate control algorithm" 35 bool "PID controller based rate control algorithm"
29 select MAC80211_RC_PID 36 depends on MAC80211_RC_PID
30 ---help--- 37 ---help---
31 Select the PID controller based rate control as the 38 Select the PID controller based rate control as the
32 default rate control algorithm. You should choose 39 default rate control algorithm. You should choose
33 this unless you know what you are doing. 40 this unless you know what you are doing.
34 41
35config MAC80211_RC_DEFAULT_NONE
36 bool "No default algorithm"
37 depends on EMBEDDED
38 help
39 Selecting this option will select no default algorithm
40 and allow you to not build any. Do not choose this
41 option unless you know your driver comes with another
42 suitable algorithm.
43endchoice 42endchoice
44 43
45comment "Selecting 'y' for an algorithm will"
46comment "build the algorithm into mac80211."
47
48config MAC80211_RC_DEFAULT 44config MAC80211_RC_DEFAULT
49 string 45 string
50 default "pid" if MAC80211_RC_DEFAULT_PID 46 default "pid" if MAC80211_RC_DEFAULT_PID
51 default "" 47 default ""
52 48
53config MAC80211_RC_PID
54 tristate "PID controller based rate control algorithm"
55 ---help---
56 This option enables a TX rate control algorithm for
57 mac80211 that uses a PID controller to select the TX
58 rate.
59
60 Say Y or M unless you're sure you want to use a
61 different rate control algorithm.
62
63endmenu 49endmenu
64 50
65config MAC80211_MESH 51config MAC80211_MESH
@@ -89,10 +75,16 @@ config MAC80211_DEBUGFS
89 75
90 Say N unless you know you need this. 76 Say N unless you know you need this.
91 77
78menuconfig MAC80211_DEBUG_MENU
79 bool "Select mac80211 debugging features"
80 depends on MAC80211
81 ---help---
82 This option collects various mac80211 debug settings.
83
92config MAC80211_DEBUG_PACKET_ALIGNMENT 84config MAC80211_DEBUG_PACKET_ALIGNMENT
93 bool "Enable packet alignment debugging" 85 bool "Enable packet alignment debugging"
94 depends on MAC80211 86 depends on MAC80211_DEBUG_MENU
95 help 87 ---help---
96 This option is recommended for driver authors and strongly 88 This option is recommended for driver authors and strongly
97 discouraged for everybody else, it will trigger a warning 89 discouraged for everybody else, it will trigger a warning
98 when a driver hands mac80211 a buffer that is aligned in 90 when a driver hands mac80211 a buffer that is aligned in
@@ -101,33 +93,95 @@ config MAC80211_DEBUG_PACKET_ALIGNMENT
101 93
102 Say N unless you're writing a mac80211 based driver. 94 Say N unless you're writing a mac80211 based driver.
103 95
104config MAC80211_DEBUG 96config MAC80211_NOINLINE
105 bool "Enable debugging output" 97 bool "Do not inline TX/RX handlers"
106 depends on MAC80211 98 depends on MAC80211_DEBUG_MENU
107 ---help--- 99 ---help---
108 This option will enable debug tracing output for the 100 This option affects code generation in mac80211, when
109 ieee80211 network stack. 101 selected some functions are marked "noinline" to allow
102 easier debugging of problems in the transmit and receive
103 paths.
104
105 This option increases code size a bit and inserts a lot
106 of function calls in the code, but is otherwise safe to
107 enable.
110 108
111 If you are not trying to debug or develop the ieee80211 109 If unsure, say N unless you expect to be finding problems
112 subsystem, you most likely want to say N here. 110 in mac80211.
111
112config MAC80211_VERBOSE_DEBUG
113 bool "Verbose debugging output"
114 depends on MAC80211_DEBUG_MENU
115 ---help---
116 Selecting this option causes mac80211 to print out
117 many debugging messages. It should not be selected
118 on production systems as some of the messages are
119 remotely triggerable.
120
121 Do not select this option.
113 122
114config MAC80211_HT_DEBUG 123config MAC80211_HT_DEBUG
115 bool "Enable HT debugging output" 124 bool "Verbose HT debugging"
116 depends on MAC80211_DEBUG 125 depends on MAC80211_DEBUG_MENU
117 ---help--- 126 ---help---
118 This option enables 802.11n High Throughput features 127 This option enables 802.11n High Throughput features
119 debug tracing output. 128 debug tracing output.
120 129
121 If you are not trying to debug of develop the ieee80211 130 It should not be selected on production systems as some
122 subsystem, you most likely want to say N here. 131 of the messages are remotely triggerable.
123 132
124config MAC80211_VERBOSE_DEBUG 133 Do not select this option.
125 bool "Verbose debugging output" 134
126 depends on MAC80211_DEBUG 135config MAC80211_TKIP_DEBUG
136 bool "Verbose TKIP debugging"
137 depends on MAC80211_DEBUG_MENU
138 ---help---
139 Selecting this option causes mac80211 to print out
140 very verbose TKIP debugging messages. It should not
141 be selected on production systems as those messages
142 are remotely triggerable.
143
144 Do not select this option.
145
146config MAC80211_IBSS_DEBUG
147 bool "Verbose IBSS debugging"
148 depends on MAC80211_DEBUG_MENU
149 ---help---
150 Selecting this option causes mac80211 to print out
151 very verbose IBSS debugging messages. It should not
152 be selected on production systems as those messages
153 are remotely triggerable.
154
155 Do not select this option.
156
157config MAC80211_VERBOSE_PS_DEBUG
158 bool "Verbose powersave mode debugging"
159 depends on MAC80211_DEBUG_MENU
160 ---help---
161 Selecting this option causes mac80211 to print out very
162 verbose power save mode debugging messages (when mac80211
163 is an AP and has power saving stations.)
164 It should not be selected on production systems as those
165 messages are remotely triggerable.
166
167 Do not select this option.
168
169config MAC80211_VERBOSE_MPL_DEBUG
170 bool "Verbose mesh peer link debugging"
171 depends on MAC80211_DEBUG_MENU
172 depends on MAC80211_MESH
173 ---help---
174 Selecting this option causes mac80211 to print out very
175 verbose mesh peer link debugging messages (when mac80211
176 is taking part in a mesh network).
177 It should not be selected on production systems as those
178 messages are remotely triggerable.
179
180 Do not select this option.
127 181
128config MAC80211_LOWTX_FRAME_DUMP 182config MAC80211_LOWTX_FRAME_DUMP
129 bool "Debug frame dumping" 183 bool "Debug frame dumping"
130 depends on MAC80211_DEBUG 184 depends on MAC80211_DEBUG_MENU
131 ---help--- 185 ---help---
132 Selecting this option will cause the stack to 186 Selecting this option will cause the stack to
133 print a message for each frame that is handed 187 print a message for each frame that is handed
@@ -138,30 +192,20 @@ config MAC80211_LOWTX_FRAME_DUMP
138 If unsure, say N and insert the debugging code 192 If unsure, say N and insert the debugging code
139 you require into the driver you are debugging. 193 you require into the driver you are debugging.
140 194
141config TKIP_DEBUG
142 bool "TKIP debugging"
143 depends on MAC80211_DEBUG
144
145config MAC80211_DEBUG_COUNTERS 195config MAC80211_DEBUG_COUNTERS
146 bool "Extra statistics for TX/RX debugging" 196 bool "Extra statistics for TX/RX debugging"
147 depends on MAC80211_DEBUG 197 depends on MAC80211_DEBUG_MENU
148 198 depends on MAC80211_DEBUGFS
149config MAC80211_IBSS_DEBUG
150 bool "Support for IBSS testing"
151 depends on MAC80211_DEBUG
152 ---help--- 199 ---help---
153 Say Y here if you intend to debug the IBSS code. 200 Selecting this option causes mac80211 to keep additional
201 and very verbose statistics about TX and RX handler use
202 and show them in debugfs.
154 203
155config MAC80211_VERBOSE_PS_DEBUG 204 If unsure, say N.
156 bool "Verbose powersave mode debugging"
157 depends on MAC80211_DEBUG
158 ---help---
159 Say Y here to print out verbose powersave
160 mode debug messages.
161 205
162config MAC80211_VERBOSE_MPL_DEBUG 206config MAC80211_VERBOSE_SPECT_MGMT_DEBUG
163 bool "Verbose mesh peer link debugging" 207 bool "Verbose Spectrum Management (IEEE 802.11h)debugging"
164 depends on MAC80211_DEBUG && MAC80211_MESH 208 depends on MAC80211_DEBUG_MENU
165 ---help--- 209 ---help---
166 Say Y here to print out verbose mesh peer link 210 Say Y here to print out verbose Spectrum Management (IEEE 802.11h)
167 debug messages. 211 debug messages.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4e5847fd316c..a169b0201d61 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -1,13 +1,5 @@
1obj-$(CONFIG_MAC80211) += mac80211.o 1obj-$(CONFIG_MAC80211) += mac80211.o
2 2
3# objects for PID algorithm
4rc80211_pid-y := rc80211_pid_algo.o
5rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
6
7# build helper for PID algorithm
8rc-pid-y := $(rc80211_pid-y)
9rc-pid-m := rc80211_pid.o
10
11# mac80211 objects 3# mac80211 objects
12mac80211-y := \ 4mac80211-y := \
13 main.o \ 5 main.o \
@@ -26,10 +18,10 @@ mac80211-y := \
26 tx.o \ 18 tx.o \
27 key.o \ 19 key.o \
28 util.o \ 20 util.o \
21 wme.o \
29 event.o 22 event.o
30 23
31mac80211-$(CONFIG_MAC80211_LEDS) += led.o 24mac80211-$(CONFIG_MAC80211_LEDS) += led.o
32mac80211-$(CONFIG_NET_SCHED) += wme.o
33mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 25mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
34 debugfs.o \ 26 debugfs.o \
35 debugfs_sta.o \ 27 debugfs_sta.o \
@@ -42,10 +34,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
42 mesh_plink.o \ 34 mesh_plink.o \
43 mesh_hwmp.o 35 mesh_hwmp.o
44 36
37# objects for PID algorithm
38rc80211_pid-y := rc80211_pid_algo.o
39rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
45 40
46# Build rate control algorithm(s) 41mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
47CFLAGS_rc80211_pid_algo.o += -DRC80211_PID_COMPILE
48mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc-pid-$(CONFIG_MAC80211_RC_PID))
49
50# Modular rate algorithms are assigned to mac80211-m - make separate modules
51obj-m += $(mac80211-m)
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 59f1691f62c8..a87cb3ba2df6 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -16,31 +16,28 @@
16#include "key.h" 16#include "key.h"
17#include "aes_ccm.h" 17#include "aes_ccm.h"
18 18
19 19static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a)
20static void ieee80211_aes_encrypt(struct crypto_cipher *tfm,
21 const u8 pt[16], u8 ct[16])
22{
23 crypto_cipher_encrypt_one(tfm, ct, pt);
24}
25
26
27static inline void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *b_0, u8 *aad,
28 u8 *b, u8 *s_0, u8 *a)
29{ 20{
30 int i; 21 int i;
22 u8 *b_0, *aad, *b, *s_0;
31 23
32 ieee80211_aes_encrypt(tfm, b_0, b); 24 b_0 = scratch + 3 * AES_BLOCK_LEN;
25 aad = scratch + 4 * AES_BLOCK_LEN;
26 b = scratch;
27 s_0 = scratch + AES_BLOCK_LEN;
28
29 crypto_cipher_encrypt_one(tfm, b, b_0);
33 30
34 /* Extra Authenticate-only data (always two AES blocks) */ 31 /* Extra Authenticate-only data (always two AES blocks) */
35 for (i = 0; i < AES_BLOCK_LEN; i++) 32 for (i = 0; i < AES_BLOCK_LEN; i++)
36 aad[i] ^= b[i]; 33 aad[i] ^= b[i];
37 ieee80211_aes_encrypt(tfm, aad, b); 34 crypto_cipher_encrypt_one(tfm, b, aad);
38 35
39 aad += AES_BLOCK_LEN; 36 aad += AES_BLOCK_LEN;
40 37
41 for (i = 0; i < AES_BLOCK_LEN; i++) 38 for (i = 0; i < AES_BLOCK_LEN; i++)
42 aad[i] ^= b[i]; 39 aad[i] ^= b[i];
43 ieee80211_aes_encrypt(tfm, aad, a); 40 crypto_cipher_encrypt_one(tfm, a, aad);
44 41
45 /* Mask out bits from auth-only-b_0 */ 42 /* Mask out bits from auth-only-b_0 */
46 b_0[0] &= 0x07; 43 b_0[0] &= 0x07;
@@ -48,24 +45,26 @@ static inline void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *b_0, u8 *aad,
48 /* S_0 is used to encrypt T (= MIC) */ 45 /* S_0 is used to encrypt T (= MIC) */
49 b_0[14] = 0; 46 b_0[14] = 0;
50 b_0[15] = 0; 47 b_0[15] = 0;
51 ieee80211_aes_encrypt(tfm, b_0, s_0); 48 crypto_cipher_encrypt_one(tfm, s_0, b_0);
52} 49}
53 50
54 51
55void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, 52void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
56 u8 *b_0, u8 *aad, u8 *data, size_t data_len, 53 u8 *data, size_t data_len,
57 u8 *cdata, u8 *mic) 54 u8 *cdata, u8 *mic)
58{ 55{
59 int i, j, last_len, num_blocks; 56 int i, j, last_len, num_blocks;
60 u8 *pos, *cpos, *b, *s_0, *e; 57 u8 *pos, *cpos, *b, *s_0, *e, *b_0, *aad;
61 58
62 b = scratch; 59 b = scratch;
63 s_0 = scratch + AES_BLOCK_LEN; 60 s_0 = scratch + AES_BLOCK_LEN;
64 e = scratch + 2 * AES_BLOCK_LEN; 61 e = scratch + 2 * AES_BLOCK_LEN;
62 b_0 = scratch + 3 * AES_BLOCK_LEN;
63 aad = scratch + 4 * AES_BLOCK_LEN;
65 64
66 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 65 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
67 last_len = data_len % AES_BLOCK_LEN; 66 last_len = data_len % AES_BLOCK_LEN;
68 aes_ccm_prepare(tfm, b_0, aad, b, s_0, b); 67 aes_ccm_prepare(tfm, scratch, b);
69 68
70 /* Process payload blocks */ 69 /* Process payload blocks */
71 pos = data; 70 pos = data;
@@ -77,11 +76,11 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
77 /* Authentication followed by encryption */ 76 /* Authentication followed by encryption */
78 for (i = 0; i < blen; i++) 77 for (i = 0; i < blen; i++)
79 b[i] ^= pos[i]; 78 b[i] ^= pos[i];
80 ieee80211_aes_encrypt(tfm, b, b); 79 crypto_cipher_encrypt_one(tfm, b, b);
81 80
82 b_0[14] = (j >> 8) & 0xff; 81 b_0[14] = (j >> 8) & 0xff;
83 b_0[15] = j & 0xff; 82 b_0[15] = j & 0xff;
84 ieee80211_aes_encrypt(tfm, b_0, e); 83 crypto_cipher_encrypt_one(tfm, e, b_0);
85 for (i = 0; i < blen; i++) 84 for (i = 0; i < blen; i++)
86 *cpos++ = *pos++ ^ e[i]; 85 *cpos++ = *pos++ ^ e[i];
87 } 86 }
@@ -92,19 +91,20 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
92 91
93 92
94int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, 93int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
95 u8 *b_0, u8 *aad, u8 *cdata, size_t data_len, 94 u8 *cdata, size_t data_len, u8 *mic, u8 *data)
96 u8 *mic, u8 *data)
97{ 95{
98 int i, j, last_len, num_blocks; 96 int i, j, last_len, num_blocks;
99 u8 *pos, *cpos, *b, *s_0, *a; 97 u8 *pos, *cpos, *b, *s_0, *a, *b_0, *aad;
100 98
101 b = scratch; 99 b = scratch;
102 s_0 = scratch + AES_BLOCK_LEN; 100 s_0 = scratch + AES_BLOCK_LEN;
103 a = scratch + 2 * AES_BLOCK_LEN; 101 a = scratch + 2 * AES_BLOCK_LEN;
102 b_0 = scratch + 3 * AES_BLOCK_LEN;
103 aad = scratch + 4 * AES_BLOCK_LEN;
104 104
105 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 105 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
106 last_len = data_len % AES_BLOCK_LEN; 106 last_len = data_len % AES_BLOCK_LEN;
107 aes_ccm_prepare(tfm, b_0, aad, b, s_0, a); 107 aes_ccm_prepare(tfm, scratch, a);
108 108
109 /* Process payload blocks */ 109 /* Process payload blocks */
110 cpos = cdata; 110 cpos = cdata;
@@ -116,13 +116,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
116 /* Decryption followed by authentication */ 116 /* Decryption followed by authentication */
117 b_0[14] = (j >> 8) & 0xff; 117 b_0[14] = (j >> 8) & 0xff;
118 b_0[15] = j & 0xff; 118 b_0[15] = j & 0xff;
119 ieee80211_aes_encrypt(tfm, b_0, b); 119 crypto_cipher_encrypt_one(tfm, b, b_0);
120 for (i = 0; i < blen; i++) { 120 for (i = 0; i < blen; i++) {
121 *pos = *cpos++ ^ b[i]; 121 *pos = *cpos++ ^ b[i];
122 a[i] ^= *pos++; 122 a[i] ^= *pos++;
123 } 123 }
124 124 crypto_cipher_encrypt_one(tfm, a, a);
125 ieee80211_aes_encrypt(tfm, a, a);
126 } 125 }
127 126
128 for (i = 0; i < CCMP_MIC_LEN; i++) { 127 for (i = 0; i < CCMP_MIC_LEN; i++) {
@@ -134,7 +133,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
134} 133}
135 134
136 135
137struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]) 136struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
138{ 137{
139 struct crypto_cipher *tfm; 138 struct crypto_cipher *tfm;
140 139
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 885f19030b29..6e7820ef3448 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -14,12 +14,12 @@
14 14
15#define AES_BLOCK_LEN 16 15#define AES_BLOCK_LEN 16
16 16
17struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]); 17struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]);
18void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, 18void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
19 u8 *b_0, u8 *aad, u8 *data, size_t data_len, 19 u8 *data, size_t data_len,
20 u8 *cdata, u8 *mic); 20 u8 *cdata, u8 *mic);
21int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, 21int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
22 u8 *b_0, u8 *aad, u8 *cdata, size_t data_len, 22 u8 *cdata, size_t data_len,
23 u8 *mic, u8 *data); 23 u8 *mic, u8 *data);
24void ieee80211_aes_key_free(struct crypto_cipher *tfm); 24void ieee80211_aes_key_free(struct crypto_cipher *tfm);
25 25
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a9fce4afdf21..8e7ba0e62cf5 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -50,14 +50,11 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
50 struct ieee80211_sub_if_data *sdata; 50 struct ieee80211_sub_if_data *sdata;
51 int err; 51 int err;
52 52
53 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED))
54 return -ENODEV;
55
56 itype = nl80211_type_to_mac80211_type(type); 53 itype = nl80211_type_to_mac80211_type(type);
57 if (itype == IEEE80211_IF_TYPE_INVALID) 54 if (itype == IEEE80211_IF_TYPE_INVALID)
58 return -EINVAL; 55 return -EINVAL;
59 56
60 err = ieee80211_if_add(local->mdev, name, &dev, itype, params); 57 err = ieee80211_if_add(local, name, &dev, itype, params);
61 if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags) 58 if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags)
62 return err; 59 return err;
63 60
@@ -68,54 +65,41 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
68 65
69static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) 66static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
70{ 67{
71 struct ieee80211_local *local = wiphy_priv(wiphy);
72 struct net_device *dev; 68 struct net_device *dev;
73 char *name;
74
75 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED))
76 return -ENODEV;
77 69
78 /* we're under RTNL */ 70 /* we're under RTNL */
79 dev = __dev_get_by_index(&init_net, ifindex); 71 dev = __dev_get_by_index(&init_net, ifindex);
80 if (!dev) 72 if (!dev)
81 return 0; 73 return -ENODEV;
82 74
83 name = dev->name; 75 ieee80211_if_remove(dev);
84 76
85 return ieee80211_if_remove(local->mdev, name, -1); 77 return 0;
86} 78}
87 79
88static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, 80static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
89 enum nl80211_iftype type, u32 *flags, 81 enum nl80211_iftype type, u32 *flags,
90 struct vif_params *params) 82 struct vif_params *params)
91{ 83{
92 struct ieee80211_local *local = wiphy_priv(wiphy);
93 struct net_device *dev; 84 struct net_device *dev;
94 enum ieee80211_if_types itype; 85 enum ieee80211_if_types itype;
95 struct ieee80211_sub_if_data *sdata; 86 struct ieee80211_sub_if_data *sdata;
96 87 int ret;
97 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED))
98 return -ENODEV;
99 88
100 /* we're under RTNL */ 89 /* we're under RTNL */
101 dev = __dev_get_by_index(&init_net, ifindex); 90 dev = __dev_get_by_index(&init_net, ifindex);
102 if (!dev) 91 if (!dev)
103 return -ENODEV; 92 return -ENODEV;
104 93
105 if (netif_running(dev))
106 return -EBUSY;
107
108 itype = nl80211_type_to_mac80211_type(type); 94 itype = nl80211_type_to_mac80211_type(type);
109 if (itype == IEEE80211_IF_TYPE_INVALID) 95 if (itype == IEEE80211_IF_TYPE_INVALID)
110 return -EINVAL; 96 return -EINVAL;
111 97
112 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 98 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
113 99
114 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 100 ret = ieee80211_if_change_type(sdata, itype);
115 return -EOPNOTSUPP; 101 if (ret)
116 102 return ret;
117 ieee80211_if_reinit(dev);
118 ieee80211_if_set_type(dev, itype);
119 103
120 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) 104 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
121 ieee80211_if_sta_set_mesh_id(&sdata->u.sta, 105 ieee80211_if_sta_set_mesh_id(&sdata->u.sta,
@@ -256,8 +240,8 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
256 case ALG_TKIP: 240 case ALG_TKIP:
257 params.cipher = WLAN_CIPHER_SUITE_TKIP; 241 params.cipher = WLAN_CIPHER_SUITE_TKIP;
258 242
259 iv32 = key->u.tkip.iv32; 243 iv32 = key->u.tkip.tx.iv32;
260 iv16 = key->u.tkip.iv16; 244 iv16 = key->u.tkip.tx.iv16;
261 245
262 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && 246 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
263 sdata->local->ops->get_tkip_seq) 247 sdata->local->ops->get_tkip_seq)
@@ -485,7 +469,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
485 469
486 kfree(old); 470 kfree(old);
487 471
488 return ieee80211_if_config_beacon(sdata->dev); 472 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
489} 473}
490 474
491static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, 475static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -539,7 +523,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
539 synchronize_rcu(); 523 synchronize_rcu();
540 kfree(old); 524 kfree(old);
541 525
542 return ieee80211_if_config_beacon(dev); 526 return ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
543} 527}
544 528
545/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ 529/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
@@ -602,6 +586,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
602 */ 586 */
603 587
604 if (params->station_flags & STATION_FLAG_CHANGED) { 588 if (params->station_flags & STATION_FLAG_CHANGED) {
589 spin_lock_bh(&sta->lock);
605 sta->flags &= ~WLAN_STA_AUTHORIZED; 590 sta->flags &= ~WLAN_STA_AUTHORIZED;
606 if (params->station_flags & STATION_FLAG_AUTHORIZED) 591 if (params->station_flags & STATION_FLAG_AUTHORIZED)
607 sta->flags |= WLAN_STA_AUTHORIZED; 592 sta->flags |= WLAN_STA_AUTHORIZED;
@@ -613,6 +598,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
613 sta->flags &= ~WLAN_STA_WME; 598 sta->flags &= ~WLAN_STA_WME;
614 if (params->station_flags & STATION_FLAG_WME) 599 if (params->station_flags & STATION_FLAG_WME)
615 sta->flags |= WLAN_STA_WME; 600 sta->flags |= WLAN_STA_WME;
601 spin_unlock_bh(&sta->lock);
616 } 602 }
617 603
618 /* 604 /*
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1cccbfd781f6..ee509f1109e2 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -70,16 +70,6 @@ DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
70 70
71/* statistics stuff */ 71/* statistics stuff */
72 72
73static inline int rtnl_lock_local(struct ieee80211_local *local)
74{
75 rtnl_lock();
76 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) {
77 rtnl_unlock();
78 return -ENODEV;
79 }
80 return 0;
81}
82
83#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ 73#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
84 DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value) 74 DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value)
85 75
@@ -96,10 +86,7 @@ static ssize_t format_devstat_counter(struct ieee80211_local *local,
96 if (!local->ops->get_stats) 86 if (!local->ops->get_stats)
97 return -EOPNOTSUPP; 87 return -EOPNOTSUPP;
98 88
99 res = rtnl_lock_local(local); 89 rtnl_lock();
100 if (res)
101 return res;
102
103 res = local->ops->get_stats(local_to_hw(local), &stats); 90 res = local->ops->get_stats(local_to_hw(local), &stats);
104 rtnl_unlock(); 91 rtnl_unlock();
105 if (!res) 92 if (!res)
@@ -197,45 +184,6 @@ DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u",
197DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", 184DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u",
198 local->tx_status_drop); 185 local->tx_status_drop);
199 186
200static ssize_t stats_wme_rx_queue_read(struct file *file,
201 char __user *userbuf,
202 size_t count, loff_t *ppos)
203{
204 struct ieee80211_local *local = file->private_data;
205 char buf[NUM_RX_DATA_QUEUES*15], *p = buf;
206 int i;
207
208 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
209 p += scnprintf(p, sizeof(buf)+buf-p,
210 "%u\n", local->wme_rx_queue[i]);
211
212 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
213}
214
215static const struct file_operations stats_wme_rx_queue_ops = {
216 .read = stats_wme_rx_queue_read,
217 .open = mac80211_open_file_generic,
218};
219
220static ssize_t stats_wme_tx_queue_read(struct file *file,
221 char __user *userbuf,
222 size_t count, loff_t *ppos)
223{
224 struct ieee80211_local *local = file->private_data;
225 char buf[NUM_TX_DATA_QUEUES*15], *p = buf;
226 int i;
227
228 for (i = 0; i < NUM_TX_DATA_QUEUES; i++)
229 p += scnprintf(p, sizeof(buf)+buf-p,
230 "%u\n", local->wme_tx_queue[i]);
231
232 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
233}
234
235static const struct file_operations stats_wme_tx_queue_ops = {
236 .read = stats_wme_tx_queue_read,
237 .open = mac80211_open_file_generic,
238};
239#endif 187#endif
240 188
241DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); 189DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
@@ -303,8 +251,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
303 DEBUGFS_STATS_ADD(rx_expand_skb_head2); 251 DEBUGFS_STATS_ADD(rx_expand_skb_head2);
304 DEBUGFS_STATS_ADD(rx_handlers_fragments); 252 DEBUGFS_STATS_ADD(rx_handlers_fragments);
305 DEBUGFS_STATS_ADD(tx_status_drop); 253 DEBUGFS_STATS_ADD(tx_status_drop);
306 DEBUGFS_STATS_ADD(wme_tx_queue);
307 DEBUGFS_STATS_ADD(wme_rx_queue);
308#endif 254#endif
309 DEBUGFS_STATS_ADD(dot11ACKFailureCount); 255 DEBUGFS_STATS_ADD(dot11ACKFailureCount);
310 DEBUGFS_STATS_ADD(dot11RTSFailureCount); 256 DEBUGFS_STATS_ADD(dot11RTSFailureCount);
@@ -356,8 +302,6 @@ void debugfs_hw_del(struct ieee80211_local *local)
356 DEBUGFS_STATS_DEL(rx_expand_skb_head2); 302 DEBUGFS_STATS_DEL(rx_expand_skb_head2);
357 DEBUGFS_STATS_DEL(rx_handlers_fragments); 303 DEBUGFS_STATS_DEL(rx_handlers_fragments);
358 DEBUGFS_STATS_DEL(tx_status_drop); 304 DEBUGFS_STATS_DEL(tx_status_drop);
359 DEBUGFS_STATS_DEL(wme_tx_queue);
360 DEBUGFS_STATS_DEL(wme_rx_queue);
361#endif 305#endif
362 DEBUGFS_STATS_DEL(dot11ACKFailureCount); 306 DEBUGFS_STATS_DEL(dot11ACKFailureCount);
363 DEBUGFS_STATS_DEL(dot11RTSFailureCount); 307 DEBUGFS_STATS_DEL(dot11RTSFailureCount);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 19efc3a6a932..7439b63df5d0 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -97,8 +97,8 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
97 break; 97 break;
98 case ALG_TKIP: 98 case ALG_TKIP:
99 len = scnprintf(buf, sizeof(buf), "%08x %04x\n", 99 len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
100 key->u.tkip.iv32, 100 key->u.tkip.tx.iv32,
101 key->u.tkip.iv16); 101 key->u.tkip.tx.iv16);
102 break; 102 break;
103 case ALG_CCMP: 103 case ALG_CCMP:
104 tpn = key->u.ccmp.tx_pn; 104 tpn = key->u.ccmp.tx_pn;
@@ -128,8 +128,8 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
128 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 128 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
129 p += scnprintf(p, sizeof(buf)+buf-p, 129 p += scnprintf(p, sizeof(buf)+buf-p,
130 "%08x %04x\n", 130 "%08x %04x\n",
131 key->u.tkip.iv32_rx[i], 131 key->u.tkip.rx[i].iv32,
132 key->u.tkip.iv16_rx[i]); 132 key->u.tkip.rx[i].iv16);
133 len = p - buf; 133 len = p - buf;
134 break; 134 break;
135 case ALG_CCMP: 135 case ALG_CCMP:
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e3326d046944..475f89a8aee1 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -155,8 +155,9 @@ static const struct file_operations name##_ops = { \
155 __IEEE80211_IF_WFILE(name) 155 __IEEE80211_IF_WFILE(name)
156 156
157/* common attributes */ 157/* common attributes */
158IEEE80211_IF_FILE(channel_use, channel_use, DEC);
159IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 158IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
159IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC);
160IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC);
160 161
161/* STA/IBSS attributes */ 162/* STA/IBSS attributes */
162IEEE80211_IF_FILE(state, u.sta.state, DEC); 163IEEE80211_IF_FILE(state, u.sta.state, DEC);
@@ -192,8 +193,6 @@ __IEEE80211_IF_FILE(flags);
192IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 193IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
193IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); 194IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
194IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC); 195IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC);
195IEEE80211_IF_FILE(force_unicast_rateidx, u.ap.force_unicast_rateidx, DEC);
196IEEE80211_IF_FILE(max_ratectrl_rateidx, u.ap.max_ratectrl_rateidx, DEC);
197 196
198static ssize_t ieee80211_if_fmt_num_buffered_multicast( 197static ssize_t ieee80211_if_fmt_num_buffered_multicast(
199 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) 198 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -248,8 +247,10 @@ IEEE80211_IF_WFILE(min_discovery_timeout,
248 247
249static void add_sta_files(struct ieee80211_sub_if_data *sdata) 248static void add_sta_files(struct ieee80211_sub_if_data *sdata)
250{ 249{
251 DEBUGFS_ADD(channel_use, sta);
252 DEBUGFS_ADD(drop_unencrypted, sta); 250 DEBUGFS_ADD(drop_unencrypted, sta);
251 DEBUGFS_ADD(force_unicast_rateidx, ap);
252 DEBUGFS_ADD(max_ratectrl_rateidx, ap);
253
253 DEBUGFS_ADD(state, sta); 254 DEBUGFS_ADD(state, sta);
254 DEBUGFS_ADD(bssid, sta); 255 DEBUGFS_ADD(bssid, sta);
255 DEBUGFS_ADD(prev_bssid, sta); 256 DEBUGFS_ADD(prev_bssid, sta);
@@ -269,27 +270,30 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
269 270
270static void add_ap_files(struct ieee80211_sub_if_data *sdata) 271static void add_ap_files(struct ieee80211_sub_if_data *sdata)
271{ 272{
272 DEBUGFS_ADD(channel_use, ap);
273 DEBUGFS_ADD(drop_unencrypted, ap); 273 DEBUGFS_ADD(drop_unencrypted, ap);
274 DEBUGFS_ADD(force_unicast_rateidx, ap);
275 DEBUGFS_ADD(max_ratectrl_rateidx, ap);
276
274 DEBUGFS_ADD(num_sta_ps, ap); 277 DEBUGFS_ADD(num_sta_ps, ap);
275 DEBUGFS_ADD(dtim_count, ap); 278 DEBUGFS_ADD(dtim_count, ap);
276 DEBUGFS_ADD(num_beacons, ap); 279 DEBUGFS_ADD(num_beacons, ap);
277 DEBUGFS_ADD(force_unicast_rateidx, ap);
278 DEBUGFS_ADD(max_ratectrl_rateidx, ap);
279 DEBUGFS_ADD(num_buffered_multicast, ap); 280 DEBUGFS_ADD(num_buffered_multicast, ap);
280} 281}
281 282
282static void add_wds_files(struct ieee80211_sub_if_data *sdata) 283static void add_wds_files(struct ieee80211_sub_if_data *sdata)
283{ 284{
284 DEBUGFS_ADD(channel_use, wds);
285 DEBUGFS_ADD(drop_unencrypted, wds); 285 DEBUGFS_ADD(drop_unencrypted, wds);
286 DEBUGFS_ADD(force_unicast_rateidx, ap);
287 DEBUGFS_ADD(max_ratectrl_rateidx, ap);
288
286 DEBUGFS_ADD(peer, wds); 289 DEBUGFS_ADD(peer, wds);
287} 290}
288 291
289static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 292static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
290{ 293{
291 DEBUGFS_ADD(channel_use, vlan);
292 DEBUGFS_ADD(drop_unencrypted, vlan); 294 DEBUGFS_ADD(drop_unencrypted, vlan);
295 DEBUGFS_ADD(force_unicast_rateidx, ap);
296 DEBUGFS_ADD(max_ratectrl_rateidx, ap);
293} 297}
294 298
295static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 299static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -376,8 +380,10 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
376 380
377static void del_sta_files(struct ieee80211_sub_if_data *sdata) 381static void del_sta_files(struct ieee80211_sub_if_data *sdata)
378{ 382{
379 DEBUGFS_DEL(channel_use, sta);
380 DEBUGFS_DEL(drop_unencrypted, sta); 383 DEBUGFS_DEL(drop_unencrypted, sta);
384 DEBUGFS_DEL(force_unicast_rateidx, ap);
385 DEBUGFS_DEL(max_ratectrl_rateidx, ap);
386
381 DEBUGFS_DEL(state, sta); 387 DEBUGFS_DEL(state, sta);
382 DEBUGFS_DEL(bssid, sta); 388 DEBUGFS_DEL(bssid, sta);
383 DEBUGFS_DEL(prev_bssid, sta); 389 DEBUGFS_DEL(prev_bssid, sta);
@@ -397,27 +403,30 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
397 403
398static void del_ap_files(struct ieee80211_sub_if_data *sdata) 404static void del_ap_files(struct ieee80211_sub_if_data *sdata)
399{ 405{
400 DEBUGFS_DEL(channel_use, ap);
401 DEBUGFS_DEL(drop_unencrypted, ap); 406 DEBUGFS_DEL(drop_unencrypted, ap);
407 DEBUGFS_DEL(force_unicast_rateidx, ap);
408 DEBUGFS_DEL(max_ratectrl_rateidx, ap);
409
402 DEBUGFS_DEL(num_sta_ps, ap); 410 DEBUGFS_DEL(num_sta_ps, ap);
403 DEBUGFS_DEL(dtim_count, ap); 411 DEBUGFS_DEL(dtim_count, ap);
404 DEBUGFS_DEL(num_beacons, ap); 412 DEBUGFS_DEL(num_beacons, ap);
405 DEBUGFS_DEL(force_unicast_rateidx, ap);
406 DEBUGFS_DEL(max_ratectrl_rateidx, ap);
407 DEBUGFS_DEL(num_buffered_multicast, ap); 413 DEBUGFS_DEL(num_buffered_multicast, ap);
408} 414}
409 415
410static void del_wds_files(struct ieee80211_sub_if_data *sdata) 416static void del_wds_files(struct ieee80211_sub_if_data *sdata)
411{ 417{
412 DEBUGFS_DEL(channel_use, wds);
413 DEBUGFS_DEL(drop_unencrypted, wds); 418 DEBUGFS_DEL(drop_unencrypted, wds);
419 DEBUGFS_DEL(force_unicast_rateidx, ap);
420 DEBUGFS_DEL(max_ratectrl_rateidx, ap);
421
414 DEBUGFS_DEL(peer, wds); 422 DEBUGFS_DEL(peer, wds);
415} 423}
416 424
417static void del_vlan_files(struct ieee80211_sub_if_data *sdata) 425static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
418{ 426{
419 DEBUGFS_DEL(channel_use, vlan);
420 DEBUGFS_DEL(drop_unencrypted, vlan); 427 DEBUGFS_DEL(drop_unencrypted, vlan);
428 DEBUGFS_DEL(force_unicast_rateidx, ap);
429 DEBUGFS_DEL(max_ratectrl_rateidx, ap);
421} 430}
422 431
423static void del_monitor_files(struct ieee80211_sub_if_data *sdata) 432static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -467,12 +476,12 @@ static void del_mesh_config(struct ieee80211_sub_if_data *sdata)
467} 476}
468#endif 477#endif
469 478
470static void del_files(struct ieee80211_sub_if_data *sdata, int type) 479static void del_files(struct ieee80211_sub_if_data *sdata)
471{ 480{
472 if (!sdata->debugfsdir) 481 if (!sdata->debugfsdir)
473 return; 482 return;
474 483
475 switch (type) { 484 switch (sdata->vif.type) {
476 case IEEE80211_IF_TYPE_MESH_POINT: 485 case IEEE80211_IF_TYPE_MESH_POINT:
477#ifdef CONFIG_MAC80211_MESH 486#ifdef CONFIG_MAC80211_MESH
478 del_mesh_stats(sdata); 487 del_mesh_stats(sdata);
@@ -512,29 +521,23 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
512 sprintf(buf, "netdev:%s", sdata->dev->name); 521 sprintf(buf, "netdev:%s", sdata->dev->name);
513 sdata->debugfsdir = debugfs_create_dir(buf, 522 sdata->debugfsdir = debugfs_create_dir(buf,
514 sdata->local->hw.wiphy->debugfsdir); 523 sdata->local->hw.wiphy->debugfsdir);
524 add_files(sdata);
515} 525}
516 526
517void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) 527void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
518{ 528{
519 del_files(sdata, sdata->vif.type); 529 del_files(sdata);
520 debugfs_remove(sdata->debugfsdir); 530 debugfs_remove(sdata->debugfsdir);
521 sdata->debugfsdir = NULL; 531 sdata->debugfsdir = NULL;
522} 532}
523 533
524void ieee80211_debugfs_change_if_type(struct ieee80211_sub_if_data *sdata, 534static int netdev_notify(struct notifier_block *nb,
525 int oldtype)
526{
527 del_files(sdata, oldtype);
528 add_files(sdata);
529}
530
531static int netdev_notify(struct notifier_block * nb,
532 unsigned long state, 535 unsigned long state,
533 void *ndev) 536 void *ndev)
534{ 537{
535 struct net_device *dev = ndev; 538 struct net_device *dev = ndev;
536 struct dentry *dir; 539 struct dentry *dir;
537 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 540 struct ieee80211_sub_if_data *sdata;
538 char buf[10+IFNAMSIZ]; 541 char buf[10+IFNAMSIZ];
539 542
540 if (state != NETDEV_CHANGENAME) 543 if (state != NETDEV_CHANGENAME)
@@ -546,6 +549,8 @@ static int netdev_notify(struct notifier_block * nb,
546 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) 549 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
547 return 0; 550 return 0;
548 551
552 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
553
549 sprintf(buf, "netdev:%s", dev->name); 554 sprintf(buf, "netdev:%s", dev->name);
550 dir = sdata->debugfsdir; 555 dir = sdata->debugfsdir;
551 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) 556 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index a690071fde8a..7af731f0b731 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -6,8 +6,6 @@
6#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); 7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); 8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_change_if_type(struct ieee80211_sub_if_data *sdata,
10 int oldtype);
11void ieee80211_debugfs_netdev_init(void); 9void ieee80211_debugfs_netdev_init(void);
12void ieee80211_debugfs_netdev_exit(void); 10void ieee80211_debugfs_netdev_exit(void);
13#else 11#else
@@ -17,9 +15,6 @@ static inline void ieee80211_debugfs_add_netdev(
17static inline void ieee80211_debugfs_remove_netdev( 15static inline void ieee80211_debugfs_remove_netdev(
18 struct ieee80211_sub_if_data *sdata) 16 struct ieee80211_sub_if_data *sdata)
19{} 17{}
20static inline void ieee80211_debugfs_change_if_type(
21 struct ieee80211_sub_if_data *sdata, int oldtype)
22{}
23static inline void ieee80211_debugfs_netdev_init(void) 18static inline void ieee80211_debugfs_netdev_init(void)
24{} 19{}
25 20
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 6d47a1d31b37..79a062782d52 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,10 +63,9 @@ STA_FILE(tx_fragments, tx_fragments, LU);
63STA_FILE(tx_filtered, tx_filtered_count, LU); 63STA_FILE(tx_filtered, tx_filtered_count, LU);
64STA_FILE(tx_retry_failed, tx_retry_failed, LU); 64STA_FILE(tx_retry_failed, tx_retry_failed, LU);
65STA_FILE(tx_retry_count, tx_retry_count, LU); 65STA_FILE(tx_retry_count, tx_retry_count, LU);
66STA_FILE(last_rssi, last_rssi, D);
67STA_FILE(last_signal, last_signal, D); 66STA_FILE(last_signal, last_signal, D);
67STA_FILE(last_qual, last_qual, D);
68STA_FILE(last_noise, last_noise, D); 68STA_FILE(last_noise, last_noise, D);
69STA_FILE(channel_use, channel_use, D);
70STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); 69STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
71 70
72static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 71static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
@@ -74,14 +73,15 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
74{ 73{
75 char buf[100]; 74 char buf[100];
76 struct sta_info *sta = file->private_data; 75 struct sta_info *sta = file->private_data;
76 u32 staflags = get_sta_flags(sta);
77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s", 77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s",
78 sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", 78 staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
79 sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 79 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
80 sta->flags & WLAN_STA_PS ? "PS\n" : "", 80 staflags & WLAN_STA_PS ? "PS\n" : "",
81 sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 81 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
82 sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 82 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
83 sta->flags & WLAN_STA_WME ? "WME\n" : "", 83 staflags & WLAN_STA_WME ? "WME\n" : "",
84 sta->flags & WLAN_STA_WDS ? "WDS\n" : ""); 84 staflags & WLAN_STA_WDS ? "WDS\n" : "");
85 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 85 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
86} 86}
87STA_OPS(flags); 87STA_OPS(flags);
@@ -123,36 +123,6 @@ static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
123} 123}
124STA_OPS(last_seq_ctrl); 124STA_OPS(last_seq_ctrl);
125 125
126#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
127static ssize_t sta_wme_rx_queue_read(struct file *file, char __user *userbuf,
128 size_t count, loff_t *ppos)
129{
130 char buf[15*NUM_RX_DATA_QUEUES], *p = buf;
131 int i;
132 struct sta_info *sta = file->private_data;
133 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
134 p += scnprintf(p, sizeof(buf)+buf-p, "%u ",
135 sta->wme_rx_queue[i]);
136 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
137 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
138}
139STA_OPS(wme_rx_queue);
140
141static ssize_t sta_wme_tx_queue_read(struct file *file, char __user *userbuf,
142 size_t count, loff_t *ppos)
143{
144 char buf[15*NUM_TX_DATA_QUEUES], *p = buf;
145 int i;
146 struct sta_info *sta = file->private_data;
147 for (i = 0; i < NUM_TX_DATA_QUEUES; i++)
148 p += scnprintf(p, sizeof(buf)+buf-p, "%u ",
149 sta->wme_tx_queue[i]);
150 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
151 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
152}
153STA_OPS(wme_tx_queue);
154#endif
155
156static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 126static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
157 size_t count, loff_t *ppos) 127 size_t count, loff_t *ppos)
158{ 128{
@@ -293,10 +263,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
293 DEBUGFS_ADD(num_ps_buf_frames); 263 DEBUGFS_ADD(num_ps_buf_frames);
294 DEBUGFS_ADD(inactive_ms); 264 DEBUGFS_ADD(inactive_ms);
295 DEBUGFS_ADD(last_seq_ctrl); 265 DEBUGFS_ADD(last_seq_ctrl);
296#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
297 DEBUGFS_ADD(wme_rx_queue);
298 DEBUGFS_ADD(wme_tx_queue);
299#endif
300 DEBUGFS_ADD(agg_status); 266 DEBUGFS_ADD(agg_status);
301} 267}
302 268
@@ -306,10 +272,6 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta)
306 DEBUGFS_DEL(num_ps_buf_frames); 272 DEBUGFS_DEL(num_ps_buf_frames);
307 DEBUGFS_DEL(inactive_ms); 273 DEBUGFS_DEL(inactive_ms);
308 DEBUGFS_DEL(last_seq_ctrl); 274 DEBUGFS_DEL(last_seq_ctrl);
309#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
310 DEBUGFS_DEL(wme_rx_queue);
311 DEBUGFS_DEL(wme_tx_queue);
312#endif
313 DEBUGFS_DEL(agg_status); 275 DEBUGFS_DEL(agg_status);
314 276
315 debugfs_remove(sta->debugfs.dir); 277 debugfs_remove(sta->debugfs.dir);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 006486b26726..a4f9a832722a 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2,6 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005, Devicescape Software, Inc. 3 * Copyright 2005, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -23,6 +24,8 @@
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
25#include <net/wireless.h> 26#include <net/wireless.h>
27#include <net/iw_handler.h>
28#include <net/mac80211.h>
26#include "key.h" 29#include "key.h"
27#include "sta_info.h" 30#include "sta_info.h"
28 31
@@ -82,7 +85,7 @@ struct ieee80211_sta_bss {
82 u16 capability; /* host byte order */ 85 u16 capability; /* host byte order */
83 enum ieee80211_band band; 86 enum ieee80211_band band;
84 int freq; 87 int freq;
85 int rssi, signal, noise; 88 int signal, noise, qual;
86 u8 *wpa_ie; 89 u8 *wpa_ie;
87 size_t wpa_ie_len; 90 size_t wpa_ie_len;
88 u8 *rsn_ie; 91 u8 *rsn_ie;
@@ -91,6 +94,8 @@ struct ieee80211_sta_bss {
91 size_t wmm_ie_len; 94 size_t wmm_ie_len;
92 u8 *ht_ie; 95 u8 *ht_ie;
93 size_t ht_ie_len; 96 size_t ht_ie_len;
97 u8 *ht_add_ie;
98 size_t ht_add_ie_len;
94#ifdef CONFIG_MAC80211_MESH 99#ifdef CONFIG_MAC80211_MESH
95 u8 *mesh_id; 100 u8 *mesh_id;
96 size_t mesh_id_len; 101 size_t mesh_id_len;
@@ -147,7 +152,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
147#define IEEE80211_TX_UNICAST BIT(1) 152#define IEEE80211_TX_UNICAST BIT(1)
148#define IEEE80211_TX_PS_BUFFERED BIT(2) 153#define IEEE80211_TX_PS_BUFFERED BIT(2)
149#define IEEE80211_TX_PROBE_LAST_FRAG BIT(3) 154#define IEEE80211_TX_PROBE_LAST_FRAG BIT(3)
150#define IEEE80211_TX_INJECTED BIT(4)
151 155
152struct ieee80211_tx_data { 156struct ieee80211_tx_data {
153 struct sk_buff *skb; 157 struct sk_buff *skb;
@@ -157,13 +161,12 @@ struct ieee80211_tx_data {
157 struct sta_info *sta; 161 struct sta_info *sta;
158 struct ieee80211_key *key; 162 struct ieee80211_key *key;
159 163
160 struct ieee80211_tx_control *control;
161 struct ieee80211_channel *channel; 164 struct ieee80211_channel *channel;
162 struct ieee80211_rate *rate; 165 s8 rate_idx;
163 /* use this rate (if set) for last fragment; rate can 166 /* use this rate (if set) for last fragment; rate can
164 * be set to lower rate for the first fragments, e.g., 167 * be set to lower rate for the first fragments, e.g.,
165 * when using CTS protection with IEEE 802.11g. */ 168 * when using CTS protection with IEEE 802.11g. */
166 struct ieee80211_rate *last_frag_rate; 169 s8 last_frag_rate_idx;
167 170
168 /* Extra fragments (in addition to the first fragment 171 /* Extra fragments (in addition to the first fragment
169 * in skb) */ 172 * in skb) */
@@ -202,32 +205,16 @@ struct ieee80211_rx_data {
202 unsigned int flags; 205 unsigned int flags;
203 int sent_ps_buffered; 206 int sent_ps_buffered;
204 int queue; 207 int queue;
205 int load;
206 u32 tkip_iv32; 208 u32 tkip_iv32;
207 u16 tkip_iv16; 209 u16 tkip_iv16;
208}; 210};
209 211
210/* flags used in struct ieee80211_tx_packet_data.flags */
211#define IEEE80211_TXPD_REQ_TX_STATUS BIT(0)
212#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1)
213#define IEEE80211_TXPD_REQUEUE BIT(2)
214#define IEEE80211_TXPD_EAPOL_FRAME BIT(3)
215#define IEEE80211_TXPD_AMPDU BIT(4)
216/* Stored in sk_buff->cb */
217struct ieee80211_tx_packet_data {
218 int ifindex;
219 unsigned long jiffies;
220 unsigned int flags;
221 u8 queue;
222};
223
224struct ieee80211_tx_stored_packet { 212struct ieee80211_tx_stored_packet {
225 struct ieee80211_tx_control control;
226 struct sk_buff *skb; 213 struct sk_buff *skb;
227 struct sk_buff **extra_frag; 214 struct sk_buff **extra_frag;
228 struct ieee80211_rate *last_frag_rate; 215 s8 last_frag_rate_idx;
229 int num_extra_frag; 216 int num_extra_frag;
230 unsigned int last_frag_rate_ctrl_probe; 217 bool last_frag_rate_ctrl_probe;
231}; 218};
232 219
233struct beacon_data { 220struct beacon_data {
@@ -251,8 +238,6 @@ struct ieee80211_if_ap {
251 struct sk_buff_head ps_bc_buf; 238 struct sk_buff_head ps_bc_buf;
252 atomic_t num_sta_ps; /* number of stations in PS mode */ 239 atomic_t num_sta_ps; /* number of stations in PS mode */
253 int dtim_count; 240 int dtim_count;
254 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */
255 int max_ratectrl_rateidx; /* max TX rateidx for rate control */
256 int num_beacons; /* number of TXed beacon frames for this BSS */ 241 int num_beacons; /* number of TXed beacon frames for this BSS */
257}; 242};
258 243
@@ -262,7 +247,6 @@ struct ieee80211_if_wds {
262}; 247};
263 248
264struct ieee80211_if_vlan { 249struct ieee80211_if_vlan {
265 struct ieee80211_sub_if_data *ap;
266 struct list_head list; 250 struct list_head list;
267}; 251};
268 252
@@ -436,8 +420,6 @@ struct ieee80211_sub_if_data {
436 */ 420 */
437 u64 basic_rates; 421 u64 basic_rates;
438 422
439 u16 sequence;
440
441 /* Fragment table for host-based reassembly */ 423 /* Fragment table for host-based reassembly */
442 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 424 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
443 unsigned int fragment_next; 425 unsigned int fragment_next;
@@ -446,16 +428,18 @@ struct ieee80211_sub_if_data {
446 struct ieee80211_key *keys[NUM_DEFAULT_KEYS]; 428 struct ieee80211_key *keys[NUM_DEFAULT_KEYS];
447 struct ieee80211_key *default_key; 429 struct ieee80211_key *default_key;
448 430
431 /* BSS configuration for this interface. */
432 struct ieee80211_bss_conf bss_conf;
433
449 /* 434 /*
450 * BSS configuration for this interface. 435 * AP this belongs to: self in AP mode and
451 * 436 * corresponding AP in VLAN mode, NULL for
452 * FIXME: I feel bad putting this here when we already have a 437 * all others (might be needed later in IBSS)
453 * bss pointer, but the bss pointer is just wrong when
454 * you have multiple virtual STA mode interfaces...
455 * This needs to be fixed.
456 */ 438 */
457 struct ieee80211_bss_conf bss_conf; 439 struct ieee80211_if_ap *bss;
458 struct ieee80211_if_ap *bss; /* BSS that this device belongs to */ 440
441 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */
442 int max_ratectrl_rateidx; /* max TX rateidx for rate control */
459 443
460 union { 444 union {
461 struct ieee80211_if_ap ap; 445 struct ieee80211_if_ap ap;
@@ -464,14 +448,11 @@ struct ieee80211_sub_if_data {
464 struct ieee80211_if_sta sta; 448 struct ieee80211_if_sta sta;
465 u32 mntr_flags; 449 u32 mntr_flags;
466 } u; 450 } u;
467 int channel_use;
468 int channel_use_raw;
469 451
470#ifdef CONFIG_MAC80211_DEBUGFS 452#ifdef CONFIG_MAC80211_DEBUGFS
471 struct dentry *debugfsdir; 453 struct dentry *debugfsdir;
472 union { 454 union {
473 struct { 455 struct {
474 struct dentry *channel_use;
475 struct dentry *drop_unencrypted; 456 struct dentry *drop_unencrypted;
476 struct dentry *state; 457 struct dentry *state;
477 struct dentry *bssid; 458 struct dentry *bssid;
@@ -490,7 +471,6 @@ struct ieee80211_sub_if_data {
490 struct dentry *num_beacons_sta; 471 struct dentry *num_beacons_sta;
491 } sta; 472 } sta;
492 struct { 473 struct {
493 struct dentry *channel_use;
494 struct dentry *drop_unencrypted; 474 struct dentry *drop_unencrypted;
495 struct dentry *num_sta_ps; 475 struct dentry *num_sta_ps;
496 struct dentry *dtim_count; 476 struct dentry *dtim_count;
@@ -500,12 +480,10 @@ struct ieee80211_sub_if_data {
500 struct dentry *num_buffered_multicast; 480 struct dentry *num_buffered_multicast;
501 } ap; 481 } ap;
502 struct { 482 struct {
503 struct dentry *channel_use;
504 struct dentry *drop_unencrypted; 483 struct dentry *drop_unencrypted;
505 struct dentry *peer; 484 struct dentry *peer;
506 } wds; 485 } wds;
507 struct { 486 struct {
508 struct dentry *channel_use;
509 struct dentry *drop_unencrypted; 487 struct dentry *drop_unencrypted;
510 } vlan; 488 } vlan;
511 struct { 489 struct {
@@ -553,8 +531,6 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
553 return container_of(p, struct ieee80211_sub_if_data, vif); 531 return container_of(p, struct ieee80211_sub_if_data, vif);
554} 532}
555 533
556#define IEEE80211_DEV_TO_SUB_IF(dev) netdev_priv(dev)
557
558enum { 534enum {
559 IEEE80211_RX_MSG = 1, 535 IEEE80211_RX_MSG = 1,
560 IEEE80211_TX_STATUS_MSG = 2, 536 IEEE80211_TX_STATUS_MSG = 2,
@@ -562,6 +538,9 @@ enum {
562 IEEE80211_ADDBA_MSG = 4, 538 IEEE80211_ADDBA_MSG = 4,
563}; 539};
564 540
541/* maximum number of hardware queues we support. */
542#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
543
565struct ieee80211_local { 544struct ieee80211_local {
566 /* embed the driver visible part. 545 /* embed the driver visible part.
567 * don't cast (use the static inlines below), but we keep 546 * don't cast (use the static inlines below), but we keep
@@ -570,6 +549,8 @@ struct ieee80211_local {
570 549
571 const struct ieee80211_ops *ops; 550 const struct ieee80211_ops *ops;
572 551
552 unsigned long queue_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
553
573 struct net_device *mdev; /* wmaster# - "master" 802.11 device */ 554 struct net_device *mdev; /* wmaster# - "master" 802.11 device */
574 int open_count; 555 int open_count;
575 int monitors, cooked_mntrs; 556 int monitors, cooked_mntrs;
@@ -581,12 +562,6 @@ struct ieee80211_local {
581 bool tim_in_locked_section; /* see ieee80211_beacon_get() */ 562 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
582 int tx_headroom; /* required headroom for hardware/radiotap */ 563 int tx_headroom; /* required headroom for hardware/radiotap */
583 564
584 enum {
585 IEEE80211_DEV_UNINITIALIZED = 0,
586 IEEE80211_DEV_REGISTERED,
587 IEEE80211_DEV_UNREGISTERED,
588 } reg_state;
589
590 /* Tasklet and skb queue to process calls from IRQ mode. All frames 565 /* Tasklet and skb queue to process calls from IRQ mode. All frames
591 * added to skb_queue will be processed, but frames in 566 * added to skb_queue will be processed, but frames in
592 * skb_queue_unreliable may be dropped if the total length of these 567 * skb_queue_unreliable may be dropped if the total length of these
@@ -610,8 +585,8 @@ struct ieee80211_local {
610 struct sta_info *sta_hash[STA_HASH_SIZE]; 585 struct sta_info *sta_hash[STA_HASH_SIZE];
611 struct timer_list sta_cleanup; 586 struct timer_list sta_cleanup;
612 587
613 unsigned long state[NUM_TX_DATA_QUEUES_AMPDU]; 588 unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
614 struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES_AMPDU]; 589 struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
615 struct tasklet_struct tx_pending_tasklet; 590 struct tasklet_struct tx_pending_tasklet;
616 591
617 /* number of interfaces with corresponding IFF_ flags */ 592 /* number of interfaces with corresponding IFF_ flags */
@@ -677,9 +652,6 @@ struct ieee80211_local {
677 assoc_led_name[32], radio_led_name[32]; 652 assoc_led_name[32], radio_led_name[32];
678#endif 653#endif
679 654
680 u32 channel_use;
681 u32 channel_use_raw;
682
683#ifdef CONFIG_MAC80211_DEBUGFS 655#ifdef CONFIG_MAC80211_DEBUGFS
684 struct work_struct sta_debugfs_add; 656 struct work_struct sta_debugfs_add;
685#endif 657#endif
@@ -705,8 +677,6 @@ struct ieee80211_local {
705 unsigned int rx_expand_skb_head2; 677 unsigned int rx_expand_skb_head2;
706 unsigned int rx_handlers_fragments; 678 unsigned int rx_handlers_fragments;
707 unsigned int tx_status_drop; 679 unsigned int tx_status_drop;
708 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES];
709 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
710#define I802_DEBUG_INC(c) (c)++ 680#define I802_DEBUG_INC(c) (c)++
711#else /* CONFIG_MAC80211_DEBUG_COUNTERS */ 681#else /* CONFIG_MAC80211_DEBUG_COUNTERS */
712#define I802_DEBUG_INC(c) do { } while (0) 682#define I802_DEBUG_INC(c) do { } while (0)
@@ -764,8 +734,6 @@ struct ieee80211_local {
764 struct dentry *rx_expand_skb_head2; 734 struct dentry *rx_expand_skb_head2;
765 struct dentry *rx_handlers_fragments; 735 struct dentry *rx_handlers_fragments;
766 struct dentry *tx_status_drop; 736 struct dentry *tx_status_drop;
767 struct dentry *wme_tx_queue;
768 struct dentry *wme_rx_queue;
769#endif 737#endif
770 struct dentry *dot11ACKFailureCount; 738 struct dentry *dot11ACKFailureCount;
771 struct dentry *dot11RTSFailureCount; 739 struct dentry *dot11RTSFailureCount;
@@ -778,6 +746,16 @@ struct ieee80211_local {
778#endif 746#endif
779}; 747};
780 748
749static inline struct ieee80211_sub_if_data *
750IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
751{
752 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
753
754 BUG_ON(!local || local->mdev == dev);
755
756 return netdev_priv(dev);
757}
758
781/* this struct represents 802.11n's RA/TID combination */ 759/* this struct represents 802.11n's RA/TID combination */
782struct ieee80211_ra_tid { 760struct ieee80211_ra_tid {
783 u8 ra[ETH_ALEN]; 761 u8 ra[ETH_ALEN];
@@ -809,6 +787,10 @@ struct ieee802_11_elems {
809 u8 *preq; 787 u8 *preq;
810 u8 *prep; 788 u8 *prep;
811 u8 *perr; 789 u8 *perr;
790 u8 *ch_switch_elem;
791 u8 *country_elem;
792 u8 *pwr_constr_elem;
793 u8 *quiet_elem; /* first quite element */
812 794
813 /* length of them, respectively */ 795 /* length of them, respectively */
814 u8 ssid_len; 796 u8 ssid_len;
@@ -833,6 +815,11 @@ struct ieee802_11_elems {
833 u8 preq_len; 815 u8 preq_len;
834 u8 prep_len; 816 u8 prep_len;
835 u8 perr_len; 817 u8 perr_len;
818 u8 ch_switch_elem_len;
819 u8 country_elem_len;
820 u8 pwr_constr_elem_len;
821 u8 quiet_elem_len;
822 u8 num_of_quiet_elem; /* can be more the one */
836}; 823};
837 824
838static inline struct ieee80211_local *hw_to_local( 825static inline struct ieee80211_local *hw_to_local(
@@ -847,11 +834,6 @@ static inline struct ieee80211_hw *local_to_hw(
847 return &local->hw; 834 return &local->hw;
848} 835}
849 836
850enum ieee80211_link_state_t {
851 IEEE80211_LINK_STATE_XOFF = 0,
852 IEEE80211_LINK_STATE_PENDING,
853};
854
855struct sta_attribute { 837struct sta_attribute {
856 struct attribute attr; 838 struct attribute attr;
857 ssize_t (*show)(const struct sta_info *, char *buf); 839 ssize_t (*show)(const struct sta_info *, char *buf);
@@ -867,39 +849,16 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
867 849
868/* ieee80211.c */ 850/* ieee80211.c */
869int ieee80211_hw_config(struct ieee80211_local *local); 851int ieee80211_hw_config(struct ieee80211_local *local);
870int ieee80211_if_config(struct net_device *dev); 852int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed);
871int ieee80211_if_config_beacon(struct net_device *dev);
872void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); 853void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
873void ieee80211_if_setup(struct net_device *dev);
874u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, 854u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
875 struct ieee80211_ht_info *req_ht_cap, 855 struct ieee80211_ht_info *req_ht_cap,
876 struct ieee80211_ht_bss_info *req_bss_cap); 856 struct ieee80211_ht_bss_info *req_bss_cap);
877 857
878/* ieee80211_ioctl.c */ 858/* ieee80211_ioctl.c */
879extern const struct iw_handler_def ieee80211_iw_handler_def; 859extern const struct iw_handler_def ieee80211_iw_handler_def;
880
881
882/* Least common multiple of the used rates (in 100 kbps). This is used to
883 * calculate rate_inv values for each rate so that only integers are needed. */
884#define CHAN_UTIL_RATE_LCM 95040
885/* 1 usec is 1/8 * (95040/10) = 1188 */
886#define CHAN_UTIL_PER_USEC 1188
887/* Amount of bits to shift the result right to scale the total utilization
888 * to values that will not wrap around 32-bit integers. */
889#define CHAN_UTIL_SHIFT 9
890/* Theoretical maximum of channel utilization counter in 10 ms (stat_time=1):
891 * (CHAN_UTIL_PER_USEC * 10000) >> CHAN_UTIL_SHIFT = 23203. So dividing the
892 * raw value with about 23 should give utilization in 10th of a percentage
893 * (1/1000). However, utilization is only estimated and not all intervals
894 * between frames etc. are calculated. 18 seems to give numbers that are closer
895 * to the real maximum. */
896#define CHAN_UTIL_PER_10MS 18
897#define CHAN_UTIL_HDR_LONG (202 * CHAN_UTIL_PER_USEC)
898#define CHAN_UTIL_HDR_SHORT (40 * CHAN_UTIL_PER_USEC)
899
900
901/* ieee80211_ioctl.c */
902int ieee80211_set_freq(struct net_device *dev, int freq); 860int ieee80211_set_freq(struct net_device *dev, int freq);
861
903/* ieee80211_sta.c */ 862/* ieee80211_sta.c */
904void ieee80211_sta_timer(unsigned long data); 863void ieee80211_sta_timer(unsigned long data);
905void ieee80211_sta_work(struct work_struct *work); 864void ieee80211_sta_work(struct work_struct *work);
@@ -912,21 +871,23 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid);
912int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); 871int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len);
913void ieee80211_sta_req_auth(struct net_device *dev, 872void ieee80211_sta_req_auth(struct net_device *dev,
914 struct ieee80211_if_sta *ifsta); 873 struct ieee80211_if_sta *ifsta);
915int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len); 874int ieee80211_sta_scan_results(struct net_device *dev,
875 struct iw_request_info *info,
876 char *buf, size_t len);
916ieee80211_rx_result ieee80211_sta_rx_scan( 877ieee80211_rx_result ieee80211_sta_rx_scan(
917 struct net_device *dev, struct sk_buff *skb, 878 struct net_device *dev, struct sk_buff *skb,
918 struct ieee80211_rx_status *rx_status); 879 struct ieee80211_rx_status *rx_status);
919void ieee80211_rx_bss_list_init(struct net_device *dev); 880void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
920void ieee80211_rx_bss_list_deinit(struct net_device *dev); 881void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
921int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); 882int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
922struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, 883struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
923 struct sk_buff *skb, u8 *bssid, 884 struct sk_buff *skb, u8 *bssid,
924 u8 *addr); 885 u8 *addr, u64 supp_rates);
925int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); 886int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason);
926int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); 887int ieee80211_sta_disassociate(struct net_device *dev, u16 reason);
927void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 888void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
928 u32 changed); 889 u32 changed);
929void ieee80211_reset_erp_info(struct net_device *dev); 890u32 ieee80211_reset_erp_info(struct net_device *dev);
930int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, 891int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
931 struct ieee80211_ht_info *ht_info); 892 struct ieee80211_ht_info *ht_info);
932int ieee80211_ht_addt_info_ie_to_ht_bss_info( 893int ieee80211_ht_addt_info_ie_to_ht_bss_info(
@@ -937,10 +898,10 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
937 u16 agg_size, u16 timeout); 898 u16 agg_size, u16 timeout);
938void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, 899void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
939 u16 initiator, u16 reason_code); 900 u16 initiator, u16 reason_code);
901void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn);
940 902
941void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, 903void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
942 u16 tid, u16 initiator, u16 reason); 904 u16 tid, u16 initiator, u16 reason);
943void sta_rx_agg_session_timer_expired(unsigned long data);
944void sta_addba_resp_timer_expired(unsigned long data); 905void sta_addba_resp_timer_expired(unsigned long data);
945void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); 906void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr);
946u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 907u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -958,17 +919,15 @@ static inline void ieee80211_start_mesh(struct net_device *dev)
958{} 919{}
959#endif 920#endif
960 921
961/* ieee80211_iface.c */ 922/* interface handling */
962int ieee80211_if_add(struct net_device *dev, const char *name, 923void ieee80211_if_setup(struct net_device *dev);
963 struct net_device **new_dev, int type, 924int ieee80211_if_add(struct ieee80211_local *local, const char *name,
925 struct net_device **new_dev, enum ieee80211_if_types type,
964 struct vif_params *params); 926 struct vif_params *params);
965void ieee80211_if_set_type(struct net_device *dev, int type); 927int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
966void ieee80211_if_reinit(struct net_device *dev); 928 enum ieee80211_if_types type);
967void __ieee80211_if_del(struct ieee80211_local *local, 929void ieee80211_if_remove(struct net_device *dev);
968 struct ieee80211_sub_if_data *sdata); 930void ieee80211_remove_interfaces(struct ieee80211_local *local);
969int ieee80211_if_remove(struct net_device *dev, const char *name, int id);
970void ieee80211_if_free(struct net_device *dev);
971void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata);
972 931
973/* tx handling */ 932/* tx handling */
974void ieee80211_clear_tx_pending(struct ieee80211_local *local); 933void ieee80211_clear_tx_pending(struct ieee80211_local *local);
@@ -988,4 +947,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
988void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 947void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx,
989 struct ieee80211_hdr *hdr); 948 struct ieee80211_hdr *hdr);
990 949
950#ifdef CONFIG_MAC80211_NOINLINE
951#define debug_noinline noinline
952#else
953#define debug_noinline
954#endif
955
991#endif /* IEEE80211_I_H */ 956#endif /* IEEE80211_I_H */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 06e88a5a036d..610ed1d9893a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -2,6 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> 4 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -17,38 +18,164 @@
17#include "debugfs_netdev.h" 18#include "debugfs_netdev.h"
18#include "mesh.h" 19#include "mesh.h"
19 20
20void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata) 21/*
22 * Called when the netdev is removed or, by the code below, before
23 * the interface type changes.
24 */
25static void ieee80211_teardown_sdata(struct net_device *dev)
21{ 26{
27 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
28 struct ieee80211_local *local = sdata->local;
29 struct beacon_data *beacon;
30 struct sk_buff *skb;
31 int flushed;
22 int i; 32 int i;
23 33
24 /* Default values for sub-interface parameters */ 34 ieee80211_debugfs_remove_netdev(sdata);
25 sdata->drop_unencrypted = 0; 35
36 /* free extra data */
37 ieee80211_free_keys(sdata);
38
26 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 39 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
27 skb_queue_head_init(&sdata->fragments[i].skb_list); 40 __skb_queue_purge(&sdata->fragments[i].skb_list);
41 sdata->fragment_next = 0;
28 42
29 INIT_LIST_HEAD(&sdata->key_list); 43 switch (sdata->vif.type) {
44 case IEEE80211_IF_TYPE_AP:
45 beacon = sdata->u.ap.beacon;
46 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
47 synchronize_rcu();
48 kfree(beacon);
49
50 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
51 local->total_ps_buffered--;
52 dev_kfree_skb(skb);
53 }
54
55 break;
56 case IEEE80211_IF_TYPE_MESH_POINT:
57 /* Allow compiler to elide mesh_rmc_free call. */
58 if (ieee80211_vif_is_mesh(&sdata->vif))
59 mesh_rmc_free(dev);
60 /* fall through */
61 case IEEE80211_IF_TYPE_STA:
62 case IEEE80211_IF_TYPE_IBSS:
63 kfree(sdata->u.sta.extra_ie);
64 kfree(sdata->u.sta.assocreq_ies);
65 kfree(sdata->u.sta.assocresp_ies);
66 kfree_skb(sdata->u.sta.probe_resp);
67 break;
68 case IEEE80211_IF_TYPE_WDS:
69 case IEEE80211_IF_TYPE_VLAN:
70 case IEEE80211_IF_TYPE_MNTR:
71 break;
72 case IEEE80211_IF_TYPE_INVALID:
73 BUG();
74 break;
75 }
76
77 flushed = sta_info_flush(local, sdata);
78 WARN_ON(flushed);
30} 79}
31 80
32static void ieee80211_if_sdata_deinit(struct ieee80211_sub_if_data *sdata) 81/*
82 * Helper function to initialise an interface to a specific type.
83 */
84static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
85 enum ieee80211_if_types type)
33{ 86{
34 int i; 87 struct ieee80211_if_sta *ifsta;
35 88
36 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 89 /* clear type-dependent union */
37 __skb_queue_purge(&sdata->fragments[i].skb_list); 90 memset(&sdata->u, 0, sizeof(sdata->u));
91
92 /* and set some type-dependent values */
93 sdata->vif.type = type;
94
95 /* only monitor differs */
96 sdata->dev->type = ARPHRD_ETHER;
97
98 switch (type) {
99 case IEEE80211_IF_TYPE_AP:
100 skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
101 INIT_LIST_HEAD(&sdata->u.ap.vlans);
102 break;
103 case IEEE80211_IF_TYPE_MESH_POINT:
104 case IEEE80211_IF_TYPE_STA:
105 case IEEE80211_IF_TYPE_IBSS:
106 ifsta = &sdata->u.sta;
107 INIT_WORK(&ifsta->work, ieee80211_sta_work);
108 setup_timer(&ifsta->timer, ieee80211_sta_timer,
109 (unsigned long) sdata);
110 skb_queue_head_init(&ifsta->skb_queue);
111
112 ifsta->capab = WLAN_CAPABILITY_ESS;
113 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
114 IEEE80211_AUTH_ALG_SHARED_KEY;
115 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
116 IEEE80211_STA_AUTO_BSSID_SEL |
117 IEEE80211_STA_AUTO_CHANNEL_SEL;
118 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
119 ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
120
121 if (ieee80211_vif_is_mesh(&sdata->vif))
122 ieee80211_mesh_init_sdata(sdata);
123 break;
124 case IEEE80211_IF_TYPE_MNTR:
125 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
126 sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit;
127 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
128 MONITOR_FLAG_OTHER_BSS;
129 break;
130 case IEEE80211_IF_TYPE_WDS:
131 case IEEE80211_IF_TYPE_VLAN:
132 break;
133 case IEEE80211_IF_TYPE_INVALID:
134 BUG();
135 break;
38 } 136 }
137
138 ieee80211_debugfs_add_netdev(sdata);
139}
140
141int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
142 enum ieee80211_if_types type)
143{
144 ASSERT_RTNL();
145
146 if (type == sdata->vif.type)
147 return 0;
148
149 /*
150 * We could, here, on changes between IBSS/STA/MESH modes,
151 * invoke an MLME function instead that disassociates etc.
152 * and goes into the requested mode.
153 */
154
155 if (netif_running(sdata->dev))
156 return -EBUSY;
157
158 /* Purge and reset type-dependent state. */
159 ieee80211_teardown_sdata(sdata->dev);
160 ieee80211_setup_sdata(sdata, type);
161
162 /* reset some values that shouldn't be kept across type changes */
163 sdata->basic_rates = 0;
164 sdata->drop_unencrypted = 0;
165
166 return 0;
39} 167}
40 168
41/* Must be called with rtnl lock held. */ 169int ieee80211_if_add(struct ieee80211_local *local, const char *name,
42int ieee80211_if_add(struct net_device *dev, const char *name, 170 struct net_device **new_dev, enum ieee80211_if_types type,
43 struct net_device **new_dev, int type,
44 struct vif_params *params) 171 struct vif_params *params)
45{ 172{
46 struct net_device *ndev; 173 struct net_device *ndev;
47 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
48 struct ieee80211_sub_if_data *sdata = NULL; 174 struct ieee80211_sub_if_data *sdata = NULL;
49 int ret; 175 int ret, i;
50 176
51 ASSERT_RTNL(); 177 ASSERT_RTNL();
178
52 ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, 179 ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size,
53 name, ieee80211_if_setup); 180 name, ieee80211_if_setup);
54 if (!ndev) 181 if (!ndev)
@@ -68,26 +195,33 @@ int ieee80211_if_add(struct net_device *dev, const char *name,
68 goto fail; 195 goto fail;
69 196
70 memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); 197 memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
71 ndev->base_addr = dev->base_addr;
72 ndev->irq = dev->irq;
73 ndev->mem_start = dev->mem_start;
74 ndev->mem_end = dev->mem_end;
75 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 198 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
76 199
77 sdata = IEEE80211_DEV_TO_SUB_IF(ndev); 200 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
201 sdata = netdev_priv(ndev);
78 ndev->ieee80211_ptr = &sdata->wdev; 202 ndev->ieee80211_ptr = &sdata->wdev;
203
204 /* initialise type-independent data */
79 sdata->wdev.wiphy = local->hw.wiphy; 205 sdata->wdev.wiphy = local->hw.wiphy;
80 sdata->vif.type = IEEE80211_IF_TYPE_AP;
81 sdata->dev = ndev;
82 sdata->local = local; 206 sdata->local = local;
83 ieee80211_if_sdata_init(sdata); 207 sdata->dev = ndev;
208
209 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
210 skb_queue_head_init(&sdata->fragments[i].skb_list);
211
212 INIT_LIST_HEAD(&sdata->key_list);
213
214 sdata->force_unicast_rateidx = -1;
215 sdata->max_ratectrl_rateidx = -1;
216
217 /* setup type-dependent data */
218 ieee80211_setup_sdata(sdata, type);
84 219
85 ret = register_netdevice(ndev); 220 ret = register_netdevice(ndev);
86 if (ret) 221 if (ret)
87 goto fail; 222 goto fail;
88 223
89 ieee80211_debugfs_add_netdev(sdata); 224 ndev->uninit = ieee80211_teardown_sdata;
90 ieee80211_if_set_type(ndev, type);
91 225
92 if (ieee80211_vif_is_mesh(&sdata->vif) && 226 if (ieee80211_vif_is_mesh(&sdata->vif) &&
93 params && params->mesh_id_len) 227 params && params->mesh_id_len)
@@ -95,11 +229,6 @@ int ieee80211_if_add(struct net_device *dev, const char *name,
95 params->mesh_id_len, 229 params->mesh_id_len,
96 params->mesh_id); 230 params->mesh_id);
97 231
98 /* we're under RTNL so all this is fine */
99 if (unlikely(local->reg_state == IEEE80211_DEV_UNREGISTERED)) {
100 __ieee80211_if_del(local, sdata);
101 return -ENODEV;
102 }
103 list_add_tail_rcu(&sdata->list, &local->interfaces); 232 list_add_tail_rcu(&sdata->list, &local->interfaces);
104 233
105 if (new_dev) 234 if (new_dev)
@@ -107,217 +236,34 @@ int ieee80211_if_add(struct net_device *dev, const char *name,
107 236
108 return 0; 237 return 0;
109 238
110fail: 239 fail:
111 free_netdev(ndev); 240 free_netdev(ndev);
112 return ret; 241 return ret;
113} 242}
114 243
115void ieee80211_if_set_type(struct net_device *dev, int type) 244void ieee80211_if_remove(struct net_device *dev)
116{
117 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
118 int oldtype = sdata->vif.type;
119
120 /*
121 * We need to call this function on the master interface
122 * which already has a hard_start_xmit routine assigned
123 * which must not be changed.
124 */
125 if (dev != sdata->local->mdev)
126 dev->hard_start_xmit = ieee80211_subif_start_xmit;
127
128 /*
129 * Called even when register_netdevice fails, it would
130 * oops if assigned before initialising the rest.
131 */
132 dev->uninit = ieee80211_if_reinit;
133
134 /* most have no BSS pointer */
135 sdata->bss = NULL;
136 sdata->vif.type = type;
137
138 sdata->basic_rates = 0;
139
140 switch (type) {
141 case IEEE80211_IF_TYPE_WDS:
142 /* nothing special */
143 break;
144 case IEEE80211_IF_TYPE_VLAN:
145 sdata->u.vlan.ap = NULL;
146 break;
147 case IEEE80211_IF_TYPE_AP:
148 sdata->u.ap.force_unicast_rateidx = -1;
149 sdata->u.ap.max_ratectrl_rateidx = -1;
150 skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
151 sdata->bss = &sdata->u.ap;
152 INIT_LIST_HEAD(&sdata->u.ap.vlans);
153 break;
154 case IEEE80211_IF_TYPE_MESH_POINT:
155 case IEEE80211_IF_TYPE_STA:
156 case IEEE80211_IF_TYPE_IBSS: {
157 struct ieee80211_sub_if_data *msdata;
158 struct ieee80211_if_sta *ifsta;
159
160 ifsta = &sdata->u.sta;
161 INIT_WORK(&ifsta->work, ieee80211_sta_work);
162 setup_timer(&ifsta->timer, ieee80211_sta_timer,
163 (unsigned long) sdata);
164 skb_queue_head_init(&ifsta->skb_queue);
165
166 ifsta->capab = WLAN_CAPABILITY_ESS;
167 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
168 IEEE80211_AUTH_ALG_SHARED_KEY;
169 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
170 IEEE80211_STA_WMM_ENABLED |
171 IEEE80211_STA_AUTO_BSSID_SEL |
172 IEEE80211_STA_AUTO_CHANNEL_SEL;
173
174 msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev);
175 sdata->bss = &msdata->u.ap;
176
177 if (ieee80211_vif_is_mesh(&sdata->vif))
178 ieee80211_mesh_init_sdata(sdata);
179 break;
180 }
181 case IEEE80211_IF_TYPE_MNTR:
182 dev->type = ARPHRD_IEEE80211_RADIOTAP;
183 dev->hard_start_xmit = ieee80211_monitor_start_xmit;
184 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
185 MONITOR_FLAG_OTHER_BSS;
186 break;
187 default:
188 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x",
189 dev->name, __func__, type);
190 }
191 ieee80211_debugfs_change_if_type(sdata, oldtype);
192}
193
194/* Must be called with rtnl lock held. */
195void ieee80211_if_reinit(struct net_device *dev)
196{ 245{
197 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
198 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
199 struct sk_buff *skb;
200 int flushed;
201 247
202 ASSERT_RTNL(); 248 ASSERT_RTNL();
203 249
204 ieee80211_free_keys(sdata); 250 list_del_rcu(&sdata->list);
205 251 synchronize_rcu();
206 ieee80211_if_sdata_deinit(sdata);
207
208 /* Need to handle mesh specially to allow eliding the function call */
209 if (ieee80211_vif_is_mesh(&sdata->vif))
210 mesh_rmc_free(dev);
211
212 switch (sdata->vif.type) {
213 case IEEE80211_IF_TYPE_INVALID:
214 /* cannot happen */
215 WARN_ON(1);
216 break;
217 case IEEE80211_IF_TYPE_AP: {
218 /* Remove all virtual interfaces that use this BSS
219 * as their sdata->bss */
220 struct ieee80211_sub_if_data *tsdata, *n;
221 struct beacon_data *beacon;
222
223 list_for_each_entry_safe(tsdata, n, &local->interfaces, list) {
224 if (tsdata != sdata && tsdata->bss == &sdata->u.ap) {
225 printk(KERN_DEBUG "%s: removing virtual "
226 "interface %s because its BSS interface"
227 " is being removed\n",
228 sdata->dev->name, tsdata->dev->name);
229 list_del_rcu(&tsdata->list);
230 /*
231 * We have lots of time and can afford
232 * to sync for each interface
233 */
234 synchronize_rcu();
235 __ieee80211_if_del(local, tsdata);
236 }
237 }
238
239 beacon = sdata->u.ap.beacon;
240 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
241 synchronize_rcu();
242 kfree(beacon);
243
244 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
245 local->total_ps_buffered--;
246 dev_kfree_skb(skb);
247 }
248
249 break;
250 }
251 case IEEE80211_IF_TYPE_WDS:
252 /* nothing to do */
253 break;
254 case IEEE80211_IF_TYPE_MESH_POINT:
255 case IEEE80211_IF_TYPE_STA:
256 case IEEE80211_IF_TYPE_IBSS:
257 kfree(sdata->u.sta.extra_ie);
258 sdata->u.sta.extra_ie = NULL;
259 kfree(sdata->u.sta.assocreq_ies);
260 sdata->u.sta.assocreq_ies = NULL;
261 kfree(sdata->u.sta.assocresp_ies);
262 sdata->u.sta.assocresp_ies = NULL;
263 if (sdata->u.sta.probe_resp) {
264 dev_kfree_skb(sdata->u.sta.probe_resp);
265 sdata->u.sta.probe_resp = NULL;
266 }
267
268 break;
269 case IEEE80211_IF_TYPE_MNTR:
270 dev->type = ARPHRD_ETHER;
271 break;
272 case IEEE80211_IF_TYPE_VLAN:
273 sdata->u.vlan.ap = NULL;
274 break;
275 }
276
277 flushed = sta_info_flush(local, sdata);
278 WARN_ON(flushed);
279
280 memset(&sdata->u, 0, sizeof(sdata->u));
281 ieee80211_if_sdata_init(sdata);
282}
283
284/* Must be called with rtnl lock held. */
285void __ieee80211_if_del(struct ieee80211_local *local,
286 struct ieee80211_sub_if_data *sdata)
287{
288 struct net_device *dev = sdata->dev;
289
290 ieee80211_debugfs_remove_netdev(sdata);
291 unregister_netdevice(dev); 252 unregister_netdevice(dev);
292 /* Except master interface, the net_device will be freed by
293 * net_device->destructor (i. e. ieee80211_if_free). */
294} 253}
295 254
296/* Must be called with rtnl lock held. */ 255/*
297int ieee80211_if_remove(struct net_device *dev, const char *name, int id) 256 * Remove all interfaces, may only be called at hardware unregistration
257 * time because it doesn't do RCU-safe list removals.
258 */
259void ieee80211_remove_interfaces(struct ieee80211_local *local)
298{ 260{
299 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 261 struct ieee80211_sub_if_data *sdata, *tmp;
300 struct ieee80211_sub_if_data *sdata, *n;
301 262
302 ASSERT_RTNL(); 263 ASSERT_RTNL();
303 264
304 list_for_each_entry_safe(sdata, n, &local->interfaces, list) { 265 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
305 if ((sdata->vif.type == id || id == -1) && 266 list_del(&sdata->list);
306 strcmp(name, sdata->dev->name) == 0 && 267 unregister_netdevice(sdata->dev);
307 sdata->dev != local->mdev) {
308 list_del_rcu(&sdata->list);
309 synchronize_rcu();
310 __ieee80211_if_del(local, sdata);
311 return 0;
312 }
313 } 268 }
314 return -ENODEV;
315}
316
317void ieee80211_if_free(struct net_device *dev)
318{
319 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
320
321 ieee80211_if_sdata_deinit(sdata);
322 free_netdev(dev);
323} 269}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 220e83be3ef4..6597c779e35a 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -321,8 +321,15 @@ void ieee80211_key_link(struct ieee80211_key *key,
321 * some hardware cannot handle TKIP with QoS, so 321 * some hardware cannot handle TKIP with QoS, so
322 * we indicate whether QoS could be in use. 322 * we indicate whether QoS could be in use.
323 */ 323 */
324 if (sta->flags & WLAN_STA_WME) 324 if (test_sta_flags(sta, WLAN_STA_WME))
325 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; 325 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
326
327 /*
328 * This key is for a specific sta interface,
329 * inform the driver that it should try to store
330 * this key as pairwise key.
331 */
332 key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
326 } else { 333 } else {
327 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 334 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
328 struct sta_info *ap; 335 struct sta_info *ap;
@@ -335,7 +342,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
335 /* same here, the AP could be using QoS */ 342 /* same here, the AP could be using QoS */
336 ap = sta_info_get(key->local, key->sdata->u.sta.bssid); 343 ap = sta_info_get(key->local, key->sdata->u.sta.bssid);
337 if (ap) { 344 if (ap) {
338 if (ap->flags & WLAN_STA_WME) 345 if (test_sta_flags(ap, WLAN_STA_WME))
339 key->conf.flags |= 346 key->conf.flags |=
340 IEEE80211_KEY_FLAG_WMM_STA; 347 IEEE80211_KEY_FLAG_WMM_STA;
341 } 348 }
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index f52c3df1fe9a..425816e0996c 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -16,31 +16,18 @@
16#include <linux/rcupdate.h> 16#include <linux/rcupdate.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18 18
19/* ALG_TKIP 19#define WEP_IV_LEN 4
20 * struct ieee80211_key::key is encoded as a 256-bit (32 byte) data block: 20#define WEP_ICV_LEN 4
21 * Temporal Encryption Key (128 bits) 21#define ALG_TKIP_KEY_LEN 32
22 * Temporal Authenticator Tx MIC Key (64 bits) 22#define ALG_CCMP_KEY_LEN 16
23 * Temporal Authenticator Rx MIC Key (64 bits) 23#define CCMP_HDR_LEN 8
24 */ 24#define CCMP_MIC_LEN 8
25 25#define CCMP_TK_LEN 16
26#define WEP_IV_LEN 4 26#define CCMP_PN_LEN 6
27#define WEP_ICV_LEN 4 27#define TKIP_IV_LEN 8
28 28#define TKIP_ICV_LEN 4
29#define ALG_TKIP_KEY_LEN 32 29
30/* Starting offsets for each key */ 30#define NUM_RX_DATA_QUEUES 17
31#define ALG_TKIP_TEMP_ENCR_KEY 0
32#define ALG_TKIP_TEMP_AUTH_TX_MIC_KEY 16
33#define ALG_TKIP_TEMP_AUTH_RX_MIC_KEY 24
34#define TKIP_IV_LEN 8
35#define TKIP_ICV_LEN 4
36
37#define ALG_CCMP_KEY_LEN 16
38#define CCMP_HDR_LEN 8
39#define CCMP_MIC_LEN 8
40#define CCMP_TK_LEN 16
41#define CCMP_PN_LEN 6
42
43#define NUM_RX_DATA_QUEUES 17
44 31
45struct ieee80211_local; 32struct ieee80211_local;
46struct ieee80211_sub_if_data; 33struct ieee80211_sub_if_data;
@@ -69,6 +56,13 @@ enum ieee80211_internal_key_flags {
69 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), 56 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5),
70}; 57};
71 58
59struct tkip_ctx {
60 u32 iv32;
61 u16 iv16;
62 u16 p1k[5];
63 int initialized;
64};
65
72struct ieee80211_key { 66struct ieee80211_key {
73 struct ieee80211_local *local; 67 struct ieee80211_local *local;
74 struct ieee80211_sub_if_data *sdata; 68 struct ieee80211_sub_if_data *sdata;
@@ -85,16 +79,10 @@ struct ieee80211_key {
85 union { 79 union {
86 struct { 80 struct {
87 /* last used TSC */ 81 /* last used TSC */
88 u32 iv32; 82 struct tkip_ctx tx;
89 u16 iv16;
90 u16 p1k[5];
91 int tx_initialized;
92 83
93 /* last received RSC */ 84 /* last received RSC */
94 u32 iv32_rx[NUM_RX_DATA_QUEUES]; 85 struct tkip_ctx rx[NUM_RX_DATA_QUEUES];
95 u16 iv16_rx[NUM_RX_DATA_QUEUES];
96 u16 p1k_rx[NUM_RX_DATA_QUEUES][5];
97 int rx_initialized[NUM_RX_DATA_QUEUES];
98 } tkip; 86 } tkip;
99 struct { 87 struct {
100 u8 tx_pn[6]; 88 u8 tx_pn[6];
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index df0836ff1a20..f1a83d450ea0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -35,8 +35,6 @@
35#include "debugfs.h" 35#include "debugfs.h"
36#include "debugfs_netdev.h" 36#include "debugfs_netdev.h"
37 37
38#define SUPP_MCS_SET_LEN 16
39
40/* 38/*
41 * For seeing transmitted packets on monitor interfaces 39 * For seeing transmitted packets on monitor interfaces
42 * we have a radiotap header too. 40 * we have a radiotap header too.
@@ -107,12 +105,18 @@ static int ieee80211_master_open(struct net_device *dev)
107 105
108 /* we hold the RTNL here so can safely walk the list */ 106 /* we hold the RTNL here so can safely walk the list */
109 list_for_each_entry(sdata, &local->interfaces, list) { 107 list_for_each_entry(sdata, &local->interfaces, list) {
110 if (sdata->dev != dev && netif_running(sdata->dev)) { 108 if (netif_running(sdata->dev)) {
111 res = 0; 109 res = 0;
112 break; 110 break;
113 } 111 }
114 } 112 }
115 return res; 113
114 if (res)
115 return res;
116
117 netif_tx_start_all_queues(local->mdev);
118
119 return 0;
116} 120}
117 121
118static int ieee80211_master_stop(struct net_device *dev) 122static int ieee80211_master_stop(struct net_device *dev)
@@ -122,7 +126,7 @@ static int ieee80211_master_stop(struct net_device *dev)
122 126
123 /* we hold the RTNL here so can safely walk the list */ 127 /* we hold the RTNL here so can safely walk the list */
124 list_for_each_entry(sdata, &local->interfaces, list) 128 list_for_each_entry(sdata, &local->interfaces, list)
125 if (sdata->dev != dev && netif_running(sdata->dev)) 129 if (netif_running(sdata->dev))
126 dev_close(sdata->dev); 130 dev_close(sdata->dev);
127 131
128 return 0; 132 return 0;
@@ -147,9 +151,7 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
147 /* FIX: what would be proper limits for MTU? 151 /* FIX: what would be proper limits for MTU?
148 * This interface uses 802.3 frames. */ 152 * This interface uses 802.3 frames. */
149 if (new_mtu < 256 || 153 if (new_mtu < 256 ||
150 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { 154 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
151 printk(KERN_WARNING "%s: invalid MTU %d\n",
152 dev->name, new_mtu);
153 return -EINVAL; 155 return -EINVAL;
154 } 156 }
155 157
@@ -180,10 +182,11 @@ static int ieee80211_open(struct net_device *dev)
180{ 182{
181 struct ieee80211_sub_if_data *sdata, *nsdata; 183 struct ieee80211_sub_if_data *sdata, *nsdata;
182 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 184 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
185 struct sta_info *sta;
183 struct ieee80211_if_init_conf conf; 186 struct ieee80211_if_init_conf conf;
187 u32 changed = 0;
184 int res; 188 int res;
185 bool need_hw_reconfig = 0; 189 bool need_hw_reconfig = 0;
186 struct sta_info *sta;
187 190
188 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 191 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
189 192
@@ -191,7 +194,7 @@ static int ieee80211_open(struct net_device *dev)
191 list_for_each_entry(nsdata, &local->interfaces, list) { 194 list_for_each_entry(nsdata, &local->interfaces, list) {
192 struct net_device *ndev = nsdata->dev; 195 struct net_device *ndev = nsdata->dev;
193 196
194 if (ndev != dev && ndev != local->mdev && netif_running(ndev)) { 197 if (ndev != dev && netif_running(ndev)) {
195 /* 198 /*
196 * Allow only a single IBSS interface to be up at any 199 * Allow only a single IBSS interface to be up at any
197 * time. This is restricted because beacon distribution 200 * time. This is restricted because beacon distribution
@@ -207,30 +210,6 @@ static int ieee80211_open(struct net_device *dev)
207 return -EBUSY; 210 return -EBUSY;
208 211
209 /* 212 /*
210 * Disallow multiple IBSS/STA mode interfaces.
211 *
212 * This is a technical restriction, it is possible although
213 * most likely not IEEE 802.11 compliant to have multiple
214 * STAs with just a single hardware (the TSF timer will not
215 * be adjusted properly.)
216 *
217 * However, because mac80211 uses the master device's BSS
218 * information for each STA/IBSS interface, doing this will
219 * currently corrupt that BSS information completely, unless,
220 * a not very useful case, both STAs are associated to the
221 * same BSS.
222 *
223 * To remove this restriction, the BSS information needs to
224 * be embedded in the STA/IBSS mode sdata instead of using
225 * the master device's BSS structure.
226 */
227 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
228 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
229 (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
230 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
231 return -EBUSY;
232
233 /*
234 * The remaining checks are only performed for interfaces 213 * The remaining checks are only performed for interfaces
235 * with the same MAC address. 214 * with the same MAC address.
236 */ 215 */
@@ -249,7 +228,7 @@ static int ieee80211_open(struct net_device *dev)
249 */ 228 */
250 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && 229 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
251 nsdata->vif.type == IEEE80211_IF_TYPE_AP) 230 nsdata->vif.type == IEEE80211_IF_TYPE_AP)
252 sdata->u.vlan.ap = nsdata; 231 sdata->bss = &nsdata->u.ap;
253 } 232 }
254 } 233 }
255 234
@@ -259,10 +238,13 @@ static int ieee80211_open(struct net_device *dev)
259 return -ENOLINK; 238 return -ENOLINK;
260 break; 239 break;
261 case IEEE80211_IF_TYPE_VLAN: 240 case IEEE80211_IF_TYPE_VLAN:
262 if (!sdata->u.vlan.ap) 241 if (!sdata->bss)
263 return -ENOLINK; 242 return -ENOLINK;
243 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
264 break; 244 break;
265 case IEEE80211_IF_TYPE_AP: 245 case IEEE80211_IF_TYPE_AP:
246 sdata->bss = &sdata->u.ap;
247 break;
266 case IEEE80211_IF_TYPE_STA: 248 case IEEE80211_IF_TYPE_STA:
267 case IEEE80211_IF_TYPE_MNTR: 249 case IEEE80211_IF_TYPE_MNTR:
268 case IEEE80211_IF_TYPE_IBSS: 250 case IEEE80211_IF_TYPE_IBSS:
@@ -280,14 +262,13 @@ static int ieee80211_open(struct net_device *dev)
280 if (local->ops->start) 262 if (local->ops->start)
281 res = local->ops->start(local_to_hw(local)); 263 res = local->ops->start(local_to_hw(local));
282 if (res) 264 if (res)
283 return res; 265 goto err_del_bss;
284 need_hw_reconfig = 1; 266 need_hw_reconfig = 1;
285 ieee80211_led_radio(local, local->hw.conf.radio_enabled); 267 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
286 } 268 }
287 269
288 switch (sdata->vif.type) { 270 switch (sdata->vif.type) {
289 case IEEE80211_IF_TYPE_VLAN: 271 case IEEE80211_IF_TYPE_VLAN:
290 list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans);
291 /* no need to tell driver */ 272 /* no need to tell driver */
292 break; 273 break;
293 case IEEE80211_IF_TYPE_MNTR: 274 case IEEE80211_IF_TYPE_MNTR:
@@ -310,9 +291,9 @@ static int ieee80211_open(struct net_device *dev)
310 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) 291 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
311 local->fif_other_bss++; 292 local->fif_other_bss++;
312 293
313 netif_tx_lock_bh(local->mdev); 294 netif_addr_lock_bh(local->mdev);
314 ieee80211_configure_filter(local); 295 ieee80211_configure_filter(local);
315 netif_tx_unlock_bh(local->mdev); 296 netif_addr_unlock_bh(local->mdev);
316 break; 297 break;
317 case IEEE80211_IF_TYPE_STA: 298 case IEEE80211_IF_TYPE_STA:
318 case IEEE80211_IF_TYPE_IBSS: 299 case IEEE80211_IF_TYPE_IBSS:
@@ -326,8 +307,10 @@ static int ieee80211_open(struct net_device *dev)
326 if (res) 307 if (res)
327 goto err_stop; 308 goto err_stop;
328 309
329 ieee80211_if_config(dev); 310 if (ieee80211_vif_is_mesh(&sdata->vif))
330 ieee80211_reset_erp_info(dev); 311 ieee80211_start_mesh(sdata->dev);
312 changed |= ieee80211_reset_erp_info(dev);
313 ieee80211_bss_info_change_notify(sdata, changed);
331 ieee80211_enable_keys(sdata); 314 ieee80211_enable_keys(sdata);
332 315
333 if (sdata->vif.type == IEEE80211_IF_TYPE_STA && 316 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
@@ -346,6 +329,7 @@ static int ieee80211_open(struct net_device *dev)
346 goto err_del_interface; 329 goto err_del_interface;
347 } 330 }
348 331
332 /* no locking required since STA is not live yet */
349 sta->flags |= WLAN_STA_AUTHORIZED; 333 sta->flags |= WLAN_STA_AUTHORIZED;
350 334
351 res = sta_info_insert(sta); 335 res = sta_info_insert(sta);
@@ -385,13 +369,13 @@ static int ieee80211_open(struct net_device *dev)
385 * yet be effective. Trigger execution of ieee80211_sta_work 369 * yet be effective. Trigger execution of ieee80211_sta_work
386 * to fix this. 370 * to fix this.
387 */ 371 */
388 if(sdata->vif.type == IEEE80211_IF_TYPE_STA || 372 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
389 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 373 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
390 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 374 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
391 queue_work(local->hw.workqueue, &ifsta->work); 375 queue_work(local->hw.workqueue, &ifsta->work);
392 } 376 }
393 377
394 netif_start_queue(dev); 378 netif_tx_start_all_queues(dev);
395 379
396 return 0; 380 return 0;
397 err_del_interface: 381 err_del_interface:
@@ -399,6 +383,10 @@ static int ieee80211_open(struct net_device *dev)
399 err_stop: 383 err_stop:
400 if (!local->open_count && local->ops->stop) 384 if (!local->open_count && local->ops->stop)
401 local->ops->stop(local_to_hw(local)); 385 local->ops->stop(local_to_hw(local));
386 err_del_bss:
387 sdata->bss = NULL;
388 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
389 list_del(&sdata->u.vlan.list);
402 return res; 390 return res;
403} 391}
404 392
@@ -412,7 +400,7 @@ static int ieee80211_stop(struct net_device *dev)
412 /* 400 /*
413 * Stop TX on this interface first. 401 * Stop TX on this interface first.
414 */ 402 */
415 netif_stop_queue(dev); 403 netif_tx_stop_all_queues(dev);
416 404
417 /* 405 /*
418 * Now delete all active aggregation sessions. 406 * Now delete all active aggregation sessions.
@@ -481,7 +469,6 @@ static int ieee80211_stop(struct net_device *dev)
481 switch (sdata->vif.type) { 469 switch (sdata->vif.type) {
482 case IEEE80211_IF_TYPE_VLAN: 470 case IEEE80211_IF_TYPE_VLAN:
483 list_del(&sdata->u.vlan.list); 471 list_del(&sdata->u.vlan.list);
484 sdata->u.vlan.ap = NULL;
485 /* no need to tell driver */ 472 /* no need to tell driver */
486 break; 473 break;
487 case IEEE80211_IF_TYPE_MNTR: 474 case IEEE80211_IF_TYPE_MNTR:
@@ -503,9 +490,9 @@ static int ieee80211_stop(struct net_device *dev)
503 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) 490 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
504 local->fif_other_bss--; 491 local->fif_other_bss--;
505 492
506 netif_tx_lock_bh(local->mdev); 493 netif_addr_lock_bh(local->mdev);
507 ieee80211_configure_filter(local); 494 ieee80211_configure_filter(local);
508 netif_tx_unlock_bh(local->mdev); 495 netif_addr_unlock_bh(local->mdev);
509 break; 496 break;
510 case IEEE80211_IF_TYPE_MESH_POINT: 497 case IEEE80211_IF_TYPE_MESH_POINT:
511 case IEEE80211_IF_TYPE_STA: 498 case IEEE80211_IF_TYPE_STA:
@@ -544,6 +531,8 @@ static int ieee80211_stop(struct net_device *dev)
544 local->ops->remove_interface(local_to_hw(local), &conf); 531 local->ops->remove_interface(local_to_hw(local), &conf);
545 } 532 }
546 533
534 sdata->bss = NULL;
535
547 if (local->open_count == 0) { 536 if (local->open_count == 0) {
548 if (netif_running(local->mdev)) 537 if (netif_running(local->mdev))
549 dev_close(local->mdev); 538 dev_close(local->mdev);
@@ -584,17 +573,19 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
584 573
585 sta = sta_info_get(local, ra); 574 sta = sta_info_get(local, ra);
586 if (!sta) { 575 if (!sta) {
576#ifdef CONFIG_MAC80211_HT_DEBUG
587 printk(KERN_DEBUG "Could not find the station\n"); 577 printk(KERN_DEBUG "Could not find the station\n");
588 rcu_read_unlock(); 578#endif
589 return -ENOENT; 579 ret = -ENOENT;
580 goto exit;
590 } 581 }
591 582
592 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 583 spin_lock_bh(&sta->lock);
593 584
594 /* we have tried too many times, receiver does not want A-MPDU */ 585 /* we have tried too many times, receiver does not want A-MPDU */
595 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 586 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
596 ret = -EBUSY; 587 ret = -EBUSY;
597 goto start_ba_exit; 588 goto err_unlock_sta;
598 } 589 }
599 590
600 state = &sta->ampdu_mlme.tid_state_tx[tid]; 591 state = &sta->ampdu_mlme.tid_state_tx[tid];
@@ -605,18 +596,20 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
605 "idle on tid %u\n", tid); 596 "idle on tid %u\n", tid);
606#endif /* CONFIG_MAC80211_HT_DEBUG */ 597#endif /* CONFIG_MAC80211_HT_DEBUG */
607 ret = -EAGAIN; 598 ret = -EAGAIN;
608 goto start_ba_exit; 599 goto err_unlock_sta;
609 } 600 }
610 601
611 /* prepare A-MPDU MLME for Tx aggregation */ 602 /* prepare A-MPDU MLME for Tx aggregation */
612 sta->ampdu_mlme.tid_tx[tid] = 603 sta->ampdu_mlme.tid_tx[tid] =
613 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 604 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
614 if (!sta->ampdu_mlme.tid_tx[tid]) { 605 if (!sta->ampdu_mlme.tid_tx[tid]) {
606#ifdef CONFIG_MAC80211_HT_DEBUG
615 if (net_ratelimit()) 607 if (net_ratelimit())
616 printk(KERN_ERR "allocate tx mlme to tid %d failed\n", 608 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
617 tid); 609 tid);
610#endif
618 ret = -ENOMEM; 611 ret = -ENOMEM;
619 goto start_ba_exit; 612 goto err_unlock_sta;
620 } 613 }
621 /* Tx timer */ 614 /* Tx timer */
622 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 615 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
@@ -625,10 +618,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
625 (unsigned long)&sta->timer_to_tid[tid]; 618 (unsigned long)&sta->timer_to_tid[tid];
626 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 619 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
627 620
628 /* ensure that TX flow won't interrupt us
629 * until the end of the call to requeue function */
630 spin_lock_bh(&local->mdev->queue_lock);
631
632 /* create a new queue for this aggregation */ 621 /* create a new queue for this aggregation */
633 ret = ieee80211_ht_agg_queue_add(local, sta, tid); 622 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
634 623
@@ -639,7 +628,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
639 printk(KERN_DEBUG "BA request denied - queue unavailable for" 628 printk(KERN_DEBUG "BA request denied - queue unavailable for"
640 " tid %d\n", tid); 629 " tid %d\n", tid);
641#endif /* CONFIG_MAC80211_HT_DEBUG */ 630#endif /* CONFIG_MAC80211_HT_DEBUG */
642 goto start_ba_err; 631 goto err_unlock_queue;
643 } 632 }
644 sdata = sta->sdata; 633 sdata = sta->sdata;
645 634
@@ -655,18 +644,18 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
655 /* No need to requeue the packets in the agg queue, since we 644 /* No need to requeue the packets in the agg queue, since we
656 * held the tx lock: no packet could be enqueued to the newly 645 * held the tx lock: no packet could be enqueued to the newly
657 * allocated queue */ 646 * allocated queue */
658 ieee80211_ht_agg_queue_remove(local, sta, tid, 0); 647 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
659#ifdef CONFIG_MAC80211_HT_DEBUG 648#ifdef CONFIG_MAC80211_HT_DEBUG
660 printk(KERN_DEBUG "BA request denied - HW unavailable for" 649 printk(KERN_DEBUG "BA request denied - HW unavailable for"
661 " tid %d\n", tid); 650 " tid %d\n", tid);
662#endif /* CONFIG_MAC80211_HT_DEBUG */ 651#endif /* CONFIG_MAC80211_HT_DEBUG */
663 *state = HT_AGG_STATE_IDLE; 652 *state = HT_AGG_STATE_IDLE;
664 goto start_ba_err; 653 goto err_unlock_queue;
665 } 654 }
666 655
667 /* Will put all the packets in the new SW queue */ 656 /* Will put all the packets in the new SW queue */
668 ieee80211_requeue(local, ieee802_1d_to_ac[tid]); 657 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
669 spin_unlock_bh(&local->mdev->queue_lock); 658 spin_unlock_bh(&sta->lock);
670 659
671 /* send an addBA request */ 660 /* send an addBA request */
672 sta->ampdu_mlme.dialog_token_allocator++; 661 sta->ampdu_mlme.dialog_token_allocator++;
@@ -674,25 +663,27 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
674 sta->ampdu_mlme.dialog_token_allocator; 663 sta->ampdu_mlme.dialog_token_allocator;
675 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 664 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
676 665
666
677 ieee80211_send_addba_request(sta->sdata->dev, ra, tid, 667 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
678 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 668 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
679 sta->ampdu_mlme.tid_tx[tid]->ssn, 669 sta->ampdu_mlme.tid_tx[tid]->ssn,
680 0x40, 5000); 670 0x40, 5000);
681
682 /* activate the timer for the recipient's addBA response */ 671 /* activate the timer for the recipient's addBA response */
683 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = 672 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
684 jiffies + ADDBA_RESP_INTERVAL; 673 jiffies + ADDBA_RESP_INTERVAL;
685 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 674 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
675#ifdef CONFIG_MAC80211_HT_DEBUG
686 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); 676 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
687 goto start_ba_exit; 677#endif
678 goto exit;
688 679
689start_ba_err: 680err_unlock_queue:
690 kfree(sta->ampdu_mlme.tid_tx[tid]); 681 kfree(sta->ampdu_mlme.tid_tx[tid]);
691 sta->ampdu_mlme.tid_tx[tid] = NULL; 682 sta->ampdu_mlme.tid_tx[tid] = NULL;
692 spin_unlock_bh(&local->mdev->queue_lock);
693 ret = -EBUSY; 683 ret = -EBUSY;
694start_ba_exit: 684err_unlock_sta:
695 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 685 spin_unlock_bh(&sta->lock);
686exit:
696 rcu_read_unlock(); 687 rcu_read_unlock();
697 return ret; 688 return ret;
698} 689}
@@ -720,7 +711,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
720 711
721 /* check if the TID is in aggregation */ 712 /* check if the TID is in aggregation */
722 state = &sta->ampdu_mlme.tid_state_tx[tid]; 713 state = &sta->ampdu_mlme.tid_state_tx[tid];
723 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 714 spin_lock_bh(&sta->lock);
724 715
725 if (*state != HT_AGG_STATE_OPERATIONAL) { 716 if (*state != HT_AGG_STATE_OPERATIONAL) {
726 ret = -ENOENT; 717 ret = -ENOENT;
@@ -750,7 +741,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
750 } 741 }
751 742
752stop_BA_exit: 743stop_BA_exit:
753 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 744 spin_unlock_bh(&sta->lock);
754 rcu_read_unlock(); 745 rcu_read_unlock();
755 return ret; 746 return ret;
756} 747}
@@ -764,8 +755,10 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
764 DECLARE_MAC_BUF(mac); 755 DECLARE_MAC_BUF(mac);
765 756
766 if (tid >= STA_TID_NUM) { 757 if (tid >= STA_TID_NUM) {
758#ifdef CONFIG_MAC80211_HT_DEBUG
767 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 759 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
768 tid, STA_TID_NUM); 760 tid, STA_TID_NUM);
761#endif
769 return; 762 return;
770 } 763 }
771 764
@@ -773,18 +766,22 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
773 sta = sta_info_get(local, ra); 766 sta = sta_info_get(local, ra);
774 if (!sta) { 767 if (!sta) {
775 rcu_read_unlock(); 768 rcu_read_unlock();
769#ifdef CONFIG_MAC80211_HT_DEBUG
776 printk(KERN_DEBUG "Could not find station: %s\n", 770 printk(KERN_DEBUG "Could not find station: %s\n",
777 print_mac(mac, ra)); 771 print_mac(mac, ra));
772#endif
778 return; 773 return;
779 } 774 }
780 775
781 state = &sta->ampdu_mlme.tid_state_tx[tid]; 776 state = &sta->ampdu_mlme.tid_state_tx[tid];
782 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 777 spin_lock_bh(&sta->lock);
783 778
784 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 779 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
780#ifdef CONFIG_MAC80211_HT_DEBUG
785 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", 781 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
786 *state); 782 *state);
787 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 783#endif
784 spin_unlock_bh(&sta->lock);
788 rcu_read_unlock(); 785 rcu_read_unlock();
789 return; 786 return;
790 } 787 }
@@ -794,10 +791,12 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
794 *state |= HT_ADDBA_DRV_READY_MSK; 791 *state |= HT_ADDBA_DRV_READY_MSK;
795 792
796 if (*state == HT_AGG_STATE_OPERATIONAL) { 793 if (*state == HT_AGG_STATE_OPERATIONAL) {
794#ifdef CONFIG_MAC80211_HT_DEBUG
797 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); 795 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
796#endif
798 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 797 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
799 } 798 }
800 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 799 spin_unlock_bh(&sta->lock);
801 rcu_read_unlock(); 800 rcu_read_unlock();
802} 801}
803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); 802EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
@@ -811,8 +810,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
811 DECLARE_MAC_BUF(mac); 810 DECLARE_MAC_BUF(mac);
812 811
813 if (tid >= STA_TID_NUM) { 812 if (tid >= STA_TID_NUM) {
813#ifdef CONFIG_MAC80211_HT_DEBUG
814 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 814 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
815 tid, STA_TID_NUM); 815 tid, STA_TID_NUM);
816#endif
816 return; 817 return;
817 } 818 }
818 819
@@ -824,17 +825,23 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
824 rcu_read_lock(); 825 rcu_read_lock();
825 sta = sta_info_get(local, ra); 826 sta = sta_info_get(local, ra);
826 if (!sta) { 827 if (!sta) {
828#ifdef CONFIG_MAC80211_HT_DEBUG
827 printk(KERN_DEBUG "Could not find station: %s\n", 829 printk(KERN_DEBUG "Could not find station: %s\n",
828 print_mac(mac, ra)); 830 print_mac(mac, ra));
831#endif
829 rcu_read_unlock(); 832 rcu_read_unlock();
830 return; 833 return;
831 } 834 }
832 state = &sta->ampdu_mlme.tid_state_tx[tid]; 835 state = &sta->ampdu_mlme.tid_state_tx[tid];
833 836
834 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 837 /* NOTE: no need to use sta->lock in this state check, as
838 * ieee80211_stop_tx_ba_session will let only one stop call to
839 * pass through per sta/tid
840 */
835 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { 841 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
842#ifdef CONFIG_MAC80211_HT_DEBUG
836 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); 843 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
837 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 844#endif
838 rcu_read_unlock(); 845 rcu_read_unlock();
839 return; 846 return;
840 } 847 }
@@ -845,23 +852,20 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
845 852
846 agg_queue = sta->tid_to_tx_q[tid]; 853 agg_queue = sta->tid_to_tx_q[tid];
847 854
848 /* avoid ordering issues: we are the only one that can modify
849 * the content of the qdiscs */
850 spin_lock_bh(&local->mdev->queue_lock);
851 /* remove the queue for this aggregation */
852 ieee80211_ht_agg_queue_remove(local, sta, tid, 1); 855 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
853 spin_unlock_bh(&local->mdev->queue_lock);
854 856
855 /* we just requeued the all the frames that were in the removed 857 /* We just requeued the all the frames that were in the
856 * queue, and since we might miss a softirq we do netif_schedule. 858 * removed queue, and since we might miss a softirq we do
857 * ieee80211_wake_queue is not used here as this queue is not 859 * netif_schedule_queue. ieee80211_wake_queue is not used
858 * necessarily stopped */ 860 * here as this queue is not necessarily stopped
859 netif_schedule(local->mdev); 861 */
862 netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
863 spin_lock_bh(&sta->lock);
860 *state = HT_AGG_STATE_IDLE; 864 *state = HT_AGG_STATE_IDLE;
861 sta->ampdu_mlme.addba_req_num[tid] = 0; 865 sta->ampdu_mlme.addba_req_num[tid] = 0;
862 kfree(sta->ampdu_mlme.tid_tx[tid]); 866 kfree(sta->ampdu_mlme.tid_tx[tid]);
863 sta->ampdu_mlme.tid_tx[tid] = NULL; 867 sta->ampdu_mlme.tid_tx[tid] = NULL;
864 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 868 spin_unlock_bh(&sta->lock);
865 869
866 rcu_read_unlock(); 870 rcu_read_unlock();
867} 871}
@@ -875,9 +879,11 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
875 struct sk_buff *skb = dev_alloc_skb(0); 879 struct sk_buff *skb = dev_alloc_skb(0);
876 880
877 if (unlikely(!skb)) { 881 if (unlikely(!skb)) {
882#ifdef CONFIG_MAC80211_HT_DEBUG
878 if (net_ratelimit()) 883 if (net_ratelimit())
879 printk(KERN_WARNING "%s: Not enough memory, " 884 printk(KERN_WARNING "%s: Not enough memory, "
880 "dropping start BA session", skb->dev->name); 885 "dropping start BA session", skb->dev->name);
886#endif
881 return; 887 return;
882 } 888 }
883 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 889 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
@@ -898,9 +904,11 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
898 struct sk_buff *skb = dev_alloc_skb(0); 904 struct sk_buff *skb = dev_alloc_skb(0);
899 905
900 if (unlikely(!skb)) { 906 if (unlikely(!skb)) {
907#ifdef CONFIG_MAC80211_HT_DEBUG
901 if (net_ratelimit()) 908 if (net_ratelimit())
902 printk(KERN_WARNING "%s: Not enough memory, " 909 printk(KERN_WARNING "%s: Not enough memory, "
903 "dropping stop BA session", skb->dev->name); 910 "dropping stop BA session", skb->dev->name);
911#endif
904 return; 912 return;
905 } 913 }
906 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 914 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
@@ -951,7 +959,6 @@ static const struct header_ops ieee80211_header_ops = {
951 .cache_update = eth_header_cache_update, 959 .cache_update = eth_header_cache_update,
952}; 960};
953 961
954/* Must not be called for mdev */
955void ieee80211_if_setup(struct net_device *dev) 962void ieee80211_if_setup(struct net_device *dev)
956{ 963{
957 ether_setup(dev); 964 ether_setup(dev);
@@ -961,67 +968,52 @@ void ieee80211_if_setup(struct net_device *dev)
961 dev->change_mtu = ieee80211_change_mtu; 968 dev->change_mtu = ieee80211_change_mtu;
962 dev->open = ieee80211_open; 969 dev->open = ieee80211_open;
963 dev->stop = ieee80211_stop; 970 dev->stop = ieee80211_stop;
964 dev->destructor = ieee80211_if_free; 971 dev->destructor = free_netdev;
965} 972}
966 973
967/* everything else */ 974/* everything else */
968 975
969static int __ieee80211_if_config(struct net_device *dev, 976int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
970 struct sk_buff *beacon,
971 struct ieee80211_tx_control *control)
972{ 977{
973 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 978 struct ieee80211_local *local = sdata->local;
974 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
975 struct ieee80211_if_conf conf; 979 struct ieee80211_if_conf conf;
976 980
977 if (!local->ops->config_interface || !netif_running(dev)) 981 if (WARN_ON(!netif_running(sdata->dev)))
982 return 0;
983
984 if (!local->ops->config_interface)
978 return 0; 985 return 0;
979 986
980 memset(&conf, 0, sizeof(conf)); 987 memset(&conf, 0, sizeof(conf));
981 conf.type = sdata->vif.type; 988 conf.changed = changed;
989
982 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 990 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
983 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 991 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
984 conf.bssid = sdata->u.sta.bssid; 992 conf.bssid = sdata->u.sta.bssid;
985 conf.ssid = sdata->u.sta.ssid; 993 conf.ssid = sdata->u.sta.ssid;
986 conf.ssid_len = sdata->u.sta.ssid_len; 994 conf.ssid_len = sdata->u.sta.ssid_len;
987 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
988 conf.beacon = beacon;
989 conf.beacon_control = control;
990 ieee80211_start_mesh(dev);
991 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 995 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
996 conf.bssid = sdata->dev->dev_addr;
992 conf.ssid = sdata->u.ap.ssid; 997 conf.ssid = sdata->u.ap.ssid;
993 conf.ssid_len = sdata->u.ap.ssid_len; 998 conf.ssid_len = sdata->u.ap.ssid_len;
994 conf.beacon = beacon; 999 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
995 conf.beacon_control = control; 1000 u8 zero[ETH_ALEN] = { 0 };
1001 conf.bssid = zero;
1002 conf.ssid = zero;
1003 conf.ssid_len = 0;
1004 } else {
1005 WARN_ON(1);
1006 return -EINVAL;
996 } 1007 }
997 return local->ops->config_interface(local_to_hw(local),
998 &sdata->vif, &conf);
999}
1000 1008
1001int ieee80211_if_config(struct net_device *dev) 1009 if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID)))
1002{ 1010 return -EINVAL;
1003 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1004 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1005 if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
1006 (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
1007 return ieee80211_if_config_beacon(dev);
1008 return __ieee80211_if_config(dev, NULL, NULL);
1009}
1010 1011
1011int ieee80211_if_config_beacon(struct net_device *dev) 1012 if (WARN_ON(!conf.ssid && (changed & IEEE80211_IFCC_SSID)))
1012{ 1013 return -EINVAL;
1013 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1014 struct ieee80211_tx_control control;
1015 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1016 struct sk_buff *skb;
1017 1014
1018 if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) 1015 return local->ops->config_interface(local_to_hw(local),
1019 return 0; 1016 &sdata->vif, &conf);
1020 skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif,
1021 &control);
1022 if (!skb)
1023 return -ENOMEM;
1024 return __ieee80211_if_config(dev, skb, &control);
1025} 1017}
1026 1018
1027int ieee80211_hw_config(struct ieee80211_local *local) 1019int ieee80211_hw_config(struct ieee80211_local *local)
@@ -1068,56 +1060,84 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
1068 struct ieee80211_supported_band *sband; 1060 struct ieee80211_supported_band *sband;
1069 struct ieee80211_ht_info ht_conf; 1061 struct ieee80211_ht_info ht_conf;
1070 struct ieee80211_ht_bss_info ht_bss_conf; 1062 struct ieee80211_ht_bss_info ht_bss_conf;
1071 int i;
1072 u32 changed = 0; 1063 u32 changed = 0;
1064 int i;
1065 u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
1066 u8 tx_mcs_set_cap;
1073 1067
1074 sband = local->hw.wiphy->bands[conf->channel->band]; 1068 sband = local->hw.wiphy->bands[conf->channel->band];
1075 1069
1070 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
1071 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
1072
1076 /* HT is not supported */ 1073 /* HT is not supported */
1077 if (!sband->ht_info.ht_supported) { 1074 if (!sband->ht_info.ht_supported) {
1078 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; 1075 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1079 return 0; 1076 goto out;
1080 } 1077 }
1081 1078
1082 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info)); 1079 /* disable HT */
1083 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info)); 1080 if (!enable_ht) {
1084 1081 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
1085 if (enable_ht) {
1086 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1087 changed |= BSS_CHANGED_HT; 1082 changed |= BSS_CHANGED_HT;
1083 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1084 conf->ht_conf.ht_supported = 0;
1085 goto out;
1086 }
1088 1087
1089 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1090 ht_conf.ht_supported = 1;
1091 1088
1092 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; 1089 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1093 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); 1090 changed |= BSS_CHANGED_HT;
1094 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1095 1091
1096 for (i = 0; i < SUPP_MCS_SET_LEN; i++) 1092 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1097 ht_conf.supp_mcs_set[i] = 1093 ht_conf.ht_supported = 1;
1098 sband->ht_info.supp_mcs_set[i] &
1099 req_ht_cap->supp_mcs_set[i];
1100 1094
1101 ht_bss_conf.primary_channel = req_bss_cap->primary_channel; 1095 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
1102 ht_bss_conf.bss_cap = req_bss_cap->bss_cap; 1096 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
1103 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; 1097 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1098 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
1099 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
1100 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
1104 1101
1105 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor; 1102 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
1106 ht_conf.ampdu_density = req_ht_cap->ampdu_density; 1103 ht_conf.ampdu_density = req_ht_cap->ampdu_density;
1107 1104
1108 /* if bss configuration changed store the new one */ 1105 /* Bits 96-100 */
1109 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) || 1106 tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
1110 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) { 1107
1111 changed |= BSS_CHANGED_HT; 1108 /* configure suppoerted Tx MCS according to requested MCS
1112 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf)); 1109 * (based in most cases on Rx capabilities of peer) and self
1113 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf)); 1110 * Tx MCS capabilities (as defined by low level driver HW
1114 } 1111 * Tx capabilities) */
1115 } else { 1112 if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
1116 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) 1113 goto check_changed;
1117 changed |= BSS_CHANGED_HT;
1118 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1119 }
1120 1114
1115 /* Counting from 0 therfore + 1 */
1116 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
1117 max_tx_streams = ((tx_mcs_set_cap &
1118 IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
1119
1120 for (i = 0; i < max_tx_streams; i++)
1121 ht_conf.supp_mcs_set[i] =
1122 sband->ht_info.supp_mcs_set[i] &
1123 req_ht_cap->supp_mcs_set[i];
1124
1125 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
1126 for (i = IEEE80211_SUPP_MCS_SET_UEQM;
1127 i < IEEE80211_SUPP_MCS_SET_LEN; i++)
1128 ht_conf.supp_mcs_set[i] =
1129 sband->ht_info.supp_mcs_set[i] &
1130 req_ht_cap->supp_mcs_set[i];
1131
1132check_changed:
1133 /* if bss configuration changed store the new one */
1134 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
1135 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
1136 changed |= BSS_CHANGED_HT;
1137 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
1138 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
1139 }
1140out:
1121 return changed; 1141 return changed;
1122} 1142}
1123 1143
@@ -1136,50 +1156,30 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1136 changed); 1156 changed);
1137} 1157}
1138 1158
1139void ieee80211_reset_erp_info(struct net_device *dev) 1159u32 ieee80211_reset_erp_info(struct net_device *dev)
1140{ 1160{
1141 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1161 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1142 1162
1143 sdata->bss_conf.use_cts_prot = 0; 1163 sdata->bss_conf.use_cts_prot = 0;
1144 sdata->bss_conf.use_short_preamble = 0; 1164 sdata->bss_conf.use_short_preamble = 0;
1145 ieee80211_bss_info_change_notify(sdata, 1165 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
1146 BSS_CHANGED_ERP_CTS_PROT |
1147 BSS_CHANGED_ERP_PREAMBLE);
1148} 1166}
1149 1167
1150void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, 1168void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1151 struct sk_buff *skb, 1169 struct sk_buff *skb)
1152 struct ieee80211_tx_status *status)
1153{ 1170{
1154 struct ieee80211_local *local = hw_to_local(hw); 1171 struct ieee80211_local *local = hw_to_local(hw);
1155 struct ieee80211_tx_status *saved; 1172 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1156 int tmp; 1173 int tmp;
1157 1174
1158 skb->dev = local->mdev; 1175 skb->dev = local->mdev;
1159 saved = kmalloc(sizeof(struct ieee80211_tx_status), GFP_ATOMIC);
1160 if (unlikely(!saved)) {
1161 if (net_ratelimit())
1162 printk(KERN_WARNING "%s: Not enough memory, "
1163 "dropping tx status", skb->dev->name);
1164 /* should be dev_kfree_skb_irq, but due to this function being
1165 * named _irqsafe instead of just _irq we can't be sure that
1166 * people won't call it from non-irq contexts */
1167 dev_kfree_skb_any(skb);
1168 return;
1169 }
1170 memcpy(saved, status, sizeof(struct ieee80211_tx_status));
1171 /* copy pointer to saved status into skb->cb for use by tasklet */
1172 memcpy(skb->cb, &saved, sizeof(saved));
1173
1174 skb->pkt_type = IEEE80211_TX_STATUS_MSG; 1176 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
1175 skb_queue_tail(status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS ? 1177 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
1176 &local->skb_queue : &local->skb_queue_unreliable, skb); 1178 &local->skb_queue : &local->skb_queue_unreliable, skb);
1177 tmp = skb_queue_len(&local->skb_queue) + 1179 tmp = skb_queue_len(&local->skb_queue) +
1178 skb_queue_len(&local->skb_queue_unreliable); 1180 skb_queue_len(&local->skb_queue_unreliable);
1179 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && 1181 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
1180 (skb = skb_dequeue(&local->skb_queue_unreliable))) { 1182 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
1181 memcpy(&saved, skb->cb, sizeof(saved));
1182 kfree(saved);
1183 dev_kfree_skb_irq(skb); 1183 dev_kfree_skb_irq(skb);
1184 tmp--; 1184 tmp--;
1185 I802_DEBUG_INC(local->tx_status_drop); 1185 I802_DEBUG_INC(local->tx_status_drop);
@@ -1193,7 +1193,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
1193 struct ieee80211_local *local = (struct ieee80211_local *) data; 1193 struct ieee80211_local *local = (struct ieee80211_local *) data;
1194 struct sk_buff *skb; 1194 struct sk_buff *skb;
1195 struct ieee80211_rx_status rx_status; 1195 struct ieee80211_rx_status rx_status;
1196 struct ieee80211_tx_status *tx_status;
1197 struct ieee80211_ra_tid *ra_tid; 1196 struct ieee80211_ra_tid *ra_tid;
1198 1197
1199 while ((skb = skb_dequeue(&local->skb_queue)) || 1198 while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -1208,12 +1207,8 @@ static void ieee80211_tasklet_handler(unsigned long data)
1208 __ieee80211_rx(local_to_hw(local), skb, &rx_status); 1207 __ieee80211_rx(local_to_hw(local), skb, &rx_status);
1209 break; 1208 break;
1210 case IEEE80211_TX_STATUS_MSG: 1209 case IEEE80211_TX_STATUS_MSG:
1211 /* get pointer to saved status out of skb->cb */
1212 memcpy(&tx_status, skb->cb, sizeof(tx_status));
1213 skb->pkt_type = 0; 1210 skb->pkt_type = 0;
1214 ieee80211_tx_status(local_to_hw(local), 1211 ieee80211_tx_status(local_to_hw(local), skb);
1215 skb, tx_status);
1216 kfree(tx_status);
1217 break; 1212 break;
1218 case IEEE80211_DELBA_MSG: 1213 case IEEE80211_DELBA_MSG:
1219 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 1214 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
@@ -1227,9 +1222,8 @@ static void ieee80211_tasklet_handler(unsigned long data)
1227 ra_tid->ra, ra_tid->tid); 1222 ra_tid->ra, ra_tid->tid);
1228 dev_kfree_skb(skb); 1223 dev_kfree_skb(skb);
1229 break ; 1224 break ;
1230 default: /* should never get here! */ 1225 default:
1231 printk(KERN_ERR "%s: Unknown message type (%d)\n", 1226 WARN_ON(1);
1232 wiphy_name(local->hw.wiphy), skb->pkt_type);
1233 dev_kfree_skb(skb); 1227 dev_kfree_skb(skb);
1234 break; 1228 break;
1235 } 1229 }
@@ -1242,24 +1236,15 @@ static void ieee80211_tasklet_handler(unsigned long data)
1242 * Also, tx_packet_data in cb is restored from tx_control. */ 1236 * Also, tx_packet_data in cb is restored from tx_control. */
1243static void ieee80211_remove_tx_extra(struct ieee80211_local *local, 1237static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1244 struct ieee80211_key *key, 1238 struct ieee80211_key *key,
1245 struct sk_buff *skb, 1239 struct sk_buff *skb)
1246 struct ieee80211_tx_control *control)
1247{ 1240{
1248 int hdrlen, iv_len, mic_len; 1241 int hdrlen, iv_len, mic_len;
1249 struct ieee80211_tx_packet_data *pkt_data; 1242 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1250 1243
1251 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1244 info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
1252 pkt_data->ifindex = vif_to_sdata(control->vif)->dev->ifindex; 1245 IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
1253 pkt_data->flags = 0; 1246 IEEE80211_TX_CTL_REQUEUE |
1254 if (control->flags & IEEE80211_TXCTL_REQ_TX_STATUS) 1247 IEEE80211_TX_CTL_EAPOL_FRAME;
1255 pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS;
1256 if (control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)
1257 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT;
1258 if (control->flags & IEEE80211_TXCTL_REQUEUE)
1259 pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
1260 if (control->flags & IEEE80211_TXCTL_EAPOL_FRAME)
1261 pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME;
1262 pkt_data->queue = control->queue;
1263 1248
1264 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1249 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1265 1250
@@ -1306,9 +1291,10 @@ no_key:
1306 1291
1307static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, 1292static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1308 struct sta_info *sta, 1293 struct sta_info *sta,
1309 struct sk_buff *skb, 1294 struct sk_buff *skb)
1310 struct ieee80211_tx_status *status)
1311{ 1295{
1296 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1297
1312 sta->tx_filtered_count++; 1298 sta->tx_filtered_count++;
1313 1299
1314 /* 1300 /*
@@ -1316,7 +1302,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1316 * packet. If the STA went to power save mode, this will happen 1302 * packet. If the STA went to power save mode, this will happen
1317 * when it wakes up for the next time. 1303 * when it wakes up for the next time.
1318 */ 1304 */
1319 sta->flags |= WLAN_STA_CLEAR_PS_FILT; 1305 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
1320 1306
1321 /* 1307 /*
1322 * This code races in the following way: 1308 * This code races in the following way:
@@ -1348,84 +1334,89 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1348 * can be unknown, for example with different interrupt status 1334 * can be unknown, for example with different interrupt status
1349 * bits. 1335 * bits.
1350 */ 1336 */
1351 if (sta->flags & WLAN_STA_PS && 1337 if (test_sta_flags(sta, WLAN_STA_PS) &&
1352 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { 1338 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
1353 ieee80211_remove_tx_extra(local, sta->key, skb, 1339 ieee80211_remove_tx_extra(local, sta->key, skb);
1354 &status->control);
1355 skb_queue_tail(&sta->tx_filtered, skb); 1340 skb_queue_tail(&sta->tx_filtered, skb);
1356 return; 1341 return;
1357 } 1342 }
1358 1343
1359 if (!(sta->flags & WLAN_STA_PS) && 1344 if (!test_sta_flags(sta, WLAN_STA_PS) &&
1360 !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) { 1345 !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
1361 /* Software retry the packet once */ 1346 /* Software retry the packet once */
1362 status->control.flags |= IEEE80211_TXCTL_REQUEUE; 1347 info->flags |= IEEE80211_TX_CTL_REQUEUE;
1363 ieee80211_remove_tx_extra(local, sta->key, skb, 1348 ieee80211_remove_tx_extra(local, sta->key, skb);
1364 &status->control);
1365 dev_queue_xmit(skb); 1349 dev_queue_xmit(skb);
1366 return; 1350 return;
1367 } 1351 }
1368 1352
1353#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1369 if (net_ratelimit()) 1354 if (net_ratelimit())
1370 printk(KERN_DEBUG "%s: dropped TX filtered frame, " 1355 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
1371 "queue_len=%d PS=%d @%lu\n", 1356 "queue_len=%d PS=%d @%lu\n",
1372 wiphy_name(local->hw.wiphy), 1357 wiphy_name(local->hw.wiphy),
1373 skb_queue_len(&sta->tx_filtered), 1358 skb_queue_len(&sta->tx_filtered),
1374 !!(sta->flags & WLAN_STA_PS), jiffies); 1359 !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
1360#endif
1375 dev_kfree_skb(skb); 1361 dev_kfree_skb(skb);
1376} 1362}
1377 1363
1378void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 1364void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1379 struct ieee80211_tx_status *status)
1380{ 1365{
1381 struct sk_buff *skb2; 1366 struct sk_buff *skb2;
1382 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1383 struct ieee80211_local *local = hw_to_local(hw); 1368 struct ieee80211_local *local = hw_to_local(hw);
1369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1384 u16 frag, type; 1370 u16 frag, type;
1371 __le16 fc;
1385 struct ieee80211_tx_status_rtap_hdr *rthdr; 1372 struct ieee80211_tx_status_rtap_hdr *rthdr;
1386 struct ieee80211_sub_if_data *sdata; 1373 struct ieee80211_sub_if_data *sdata;
1387 struct net_device *prev_dev = NULL; 1374 struct net_device *prev_dev = NULL;
1388 1375 struct sta_info *sta;
1389 if (!status) {
1390 printk(KERN_ERR
1391 "%s: ieee80211_tx_status called with NULL status\n",
1392 wiphy_name(local->hw.wiphy));
1393 dev_kfree_skb(skb);
1394 return;
1395 }
1396 1376
1397 rcu_read_lock(); 1377 rcu_read_lock();
1398 1378
1399 if (status->excessive_retries) { 1379 if (info->status.excessive_retries) {
1400 struct sta_info *sta;
1401 sta = sta_info_get(local, hdr->addr1); 1380 sta = sta_info_get(local, hdr->addr1);
1402 if (sta) { 1381 if (sta) {
1403 if (sta->flags & WLAN_STA_PS) { 1382 if (test_sta_flags(sta, WLAN_STA_PS)) {
1404 /* 1383 /*
1405 * The STA is in power save mode, so assume 1384 * The STA is in power save mode, so assume
1406 * that this TX packet failed because of that. 1385 * that this TX packet failed because of that.
1407 */ 1386 */
1408 status->excessive_retries = 0; 1387 ieee80211_handle_filtered_frame(local, sta, skb);
1409 status->flags |= IEEE80211_TX_STATUS_TX_FILTERED;
1410 ieee80211_handle_filtered_frame(local, sta,
1411 skb, status);
1412 rcu_read_unlock(); 1388 rcu_read_unlock();
1413 return; 1389 return;
1414 } 1390 }
1415 } 1391 }
1416 } 1392 }
1417 1393
1418 if (status->flags & IEEE80211_TX_STATUS_TX_FILTERED) { 1394 fc = hdr->frame_control;
1419 struct sta_info *sta; 1395
1396 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
1397 (ieee80211_is_data_qos(fc))) {
1398 u16 tid, ssn;
1399 u8 *qc;
1420 sta = sta_info_get(local, hdr->addr1); 1400 sta = sta_info_get(local, hdr->addr1);
1421 if (sta) { 1401 if (sta) {
1422 ieee80211_handle_filtered_frame(local, sta, skb, 1402 qc = ieee80211_get_qos_ctl(hdr);
1423 status); 1403 tid = qc[0] & 0xf;
1404 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
1405 & IEEE80211_SCTL_SEQ);
1406 ieee80211_send_bar(sta->sdata->dev, hdr->addr1,
1407 tid, ssn);
1408 }
1409 }
1410
1411 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1412 sta = sta_info_get(local, hdr->addr1);
1413 if (sta) {
1414 ieee80211_handle_filtered_frame(local, sta, skb);
1424 rcu_read_unlock(); 1415 rcu_read_unlock();
1425 return; 1416 return;
1426 } 1417 }
1427 } else 1418 } else
1428 rate_control_tx_status(local->mdev, skb, status); 1419 rate_control_tx_status(local->mdev, skb);
1429 1420
1430 rcu_read_unlock(); 1421 rcu_read_unlock();
1431 1422
@@ -1439,14 +1430,14 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1439 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1430 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1440 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; 1431 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
1441 1432
1442 if (status->flags & IEEE80211_TX_STATUS_ACK) { 1433 if (info->flags & IEEE80211_TX_STAT_ACK) {
1443 if (frag == 0) { 1434 if (frag == 0) {
1444 local->dot11TransmittedFrameCount++; 1435 local->dot11TransmittedFrameCount++;
1445 if (is_multicast_ether_addr(hdr->addr1)) 1436 if (is_multicast_ether_addr(hdr->addr1))
1446 local->dot11MulticastTransmittedFrameCount++; 1437 local->dot11MulticastTransmittedFrameCount++;
1447 if (status->retry_count > 0) 1438 if (info->status.retry_count > 0)
1448 local->dot11RetryCount++; 1439 local->dot11RetryCount++;
1449 if (status->retry_count > 1) 1440 if (info->status.retry_count > 1)
1450 local->dot11MultipleRetryCount++; 1441 local->dot11MultipleRetryCount++;
1451 } 1442 }
1452 1443
@@ -1483,7 +1474,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1483 return; 1474 return;
1484 } 1475 }
1485 1476
1486 rthdr = (struct ieee80211_tx_status_rtap_hdr*) 1477 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
1487 skb_push(skb, sizeof(*rthdr)); 1478 skb_push(skb, sizeof(*rthdr));
1488 1479
1489 memset(rthdr, 0, sizeof(*rthdr)); 1480 memset(rthdr, 0, sizeof(*rthdr));
@@ -1492,17 +1483,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1492 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | 1483 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
1493 (1 << IEEE80211_RADIOTAP_DATA_RETRIES)); 1484 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
1494 1485
1495 if (!(status->flags & IEEE80211_TX_STATUS_ACK) && 1486 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
1496 !is_multicast_ether_addr(hdr->addr1)) 1487 !is_multicast_ether_addr(hdr->addr1))
1497 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); 1488 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
1498 1489
1499 if ((status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) && 1490 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
1500 (status->control.flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) 1491 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
1501 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); 1492 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
1502 else if (status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) 1493 else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
1503 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); 1494 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
1504 1495
1505 rthdr->data_retries = status->retry_count; 1496 rthdr->data_retries = info->status.retry_count;
1506 1497
1507 /* XXX: is this sufficient for BPF? */ 1498 /* XXX: is this sufficient for BPF? */
1508 skb_set_mac_header(skb, 0); 1499 skb_set_mac_header(skb, 0);
@@ -1628,7 +1619,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1628 int result; 1619 int result;
1629 enum ieee80211_band band; 1620 enum ieee80211_band band;
1630 struct net_device *mdev; 1621 struct net_device *mdev;
1631 struct ieee80211_sub_if_data *sdata; 1622 struct wireless_dev *mwdev;
1632 1623
1633 /* 1624 /*
1634 * generic code guarantees at least one band, 1625 * generic code guarantees at least one band,
@@ -1652,19 +1643,30 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1652 if (result < 0) 1643 if (result < 0)
1653 return result; 1644 return result;
1654 1645
1655 /* for now, mdev needs sub_if_data :/ */ 1646 /*
1656 mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), 1647 * We use the number of queues for feature tests (QoS, HT) internally
1657 "wmaster%d", ether_setup); 1648 * so restrict them appropriately.
1649 */
1650 if (hw->queues > IEEE80211_MAX_QUEUES)
1651 hw->queues = IEEE80211_MAX_QUEUES;
1652 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
1653 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
1654 if (hw->queues < 4)
1655 hw->ampdu_queues = 0;
1656
1657 mdev = alloc_netdev_mq(sizeof(struct wireless_dev),
1658 "wmaster%d", ether_setup,
1659 ieee80211_num_queues(hw));
1658 if (!mdev) 1660 if (!mdev)
1659 goto fail_mdev_alloc; 1661 goto fail_mdev_alloc;
1660 1662
1661 sdata = IEEE80211_DEV_TO_SUB_IF(mdev); 1663 mwdev = netdev_priv(mdev);
1662 mdev->ieee80211_ptr = &sdata->wdev; 1664 mdev->ieee80211_ptr = mwdev;
1663 sdata->wdev.wiphy = local->hw.wiphy; 1665 mwdev->wiphy = local->hw.wiphy;
1664 1666
1665 local->mdev = mdev; 1667 local->mdev = mdev;
1666 1668
1667 ieee80211_rx_bss_list_init(mdev); 1669 ieee80211_rx_bss_list_init(local);
1668 1670
1669 mdev->hard_start_xmit = ieee80211_master_start_xmit; 1671 mdev->hard_start_xmit = ieee80211_master_start_xmit;
1670 mdev->open = ieee80211_master_open; 1672 mdev->open = ieee80211_master_open;
@@ -1673,18 +1675,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1673 mdev->header_ops = &ieee80211_header_ops; 1675 mdev->header_ops = &ieee80211_header_ops;
1674 mdev->set_multicast_list = ieee80211_master_set_multicast_list; 1676 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
1675 1677
1676 sdata->vif.type = IEEE80211_IF_TYPE_AP;
1677 sdata->dev = mdev;
1678 sdata->local = local;
1679 sdata->u.ap.force_unicast_rateidx = -1;
1680 sdata->u.ap.max_ratectrl_rateidx = -1;
1681 ieee80211_if_sdata_init(sdata);
1682
1683 /* no RCU needed since we're still during init phase */
1684 list_add_tail(&sdata->list, &local->interfaces);
1685
1686 name = wiphy_dev(local->hw.wiphy)->driver->name; 1678 name = wiphy_dev(local->hw.wiphy)->driver->name;
1687 local->hw.workqueue = create_singlethread_workqueue(name); 1679 local->hw.workqueue = create_freezeable_workqueue(name);
1688 if (!local->hw.workqueue) { 1680 if (!local->hw.workqueue) {
1689 result = -ENOMEM; 1681 result = -ENOMEM;
1690 goto fail_workqueue; 1682 goto fail_workqueue;
@@ -1700,15 +1692,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1700 1692
1701 debugfs_hw_add(local); 1693 debugfs_hw_add(local);
1702 1694
1703 local->hw.conf.beacon_int = 1000; 1695 if (local->hw.conf.beacon_int < 10)
1696 local->hw.conf.beacon_int = 100;
1704 1697
1705 local->wstats_flags |= local->hw.max_rssi ? 1698 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
1706 IW_QUAL_LEVEL_UPDATED : IW_QUAL_LEVEL_INVALID; 1699 IEEE80211_HW_SIGNAL_DB |
1707 local->wstats_flags |= local->hw.max_signal ? 1700 IEEE80211_HW_SIGNAL_DBM) ?
1708 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID; 1701 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
1709 local->wstats_flags |= local->hw.max_noise ? 1702 local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
1710 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID; 1703 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
1711 if (local->hw.max_rssi < 0 || local->hw.max_noise < 0) 1704 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
1712 local->wstats_flags |= IW_QUAL_DBM; 1705 local->wstats_flags |= IW_QUAL_DBM;
1713 1706
1714 result = sta_info_start(local); 1707 result = sta_info_start(local);
@@ -1727,9 +1720,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1727 if (result < 0) 1720 if (result < 0)
1728 goto fail_dev; 1721 goto fail_dev;
1729 1722
1730 ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
1731 ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
1732
1733 result = ieee80211_init_rate_ctrl_alg(local, 1723 result = ieee80211_init_rate_ctrl_alg(local,
1734 hw->rate_control_algorithm); 1724 hw->rate_control_algorithm);
1735 if (result < 0) { 1725 if (result < 0) {
@@ -1746,16 +1736,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1746 goto fail_wep; 1736 goto fail_wep;
1747 } 1737 }
1748 1738
1749 ieee80211_install_qdisc(local->mdev); 1739 local->mdev->select_queue = ieee80211_select_queue;
1750 1740
1751 /* add one default STA interface */ 1741 /* add one default STA interface */
1752 result = ieee80211_if_add(local->mdev, "wlan%d", NULL, 1742 result = ieee80211_if_add(local, "wlan%d", NULL,
1753 IEEE80211_IF_TYPE_STA, NULL); 1743 IEEE80211_IF_TYPE_STA, NULL);
1754 if (result) 1744 if (result)
1755 printk(KERN_WARNING "%s: Failed to add default virtual iface\n", 1745 printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
1756 wiphy_name(local->hw.wiphy)); 1746 wiphy_name(local->hw.wiphy));
1757 1747
1758 local->reg_state = IEEE80211_DEV_REGISTERED;
1759 rtnl_unlock(); 1748 rtnl_unlock();
1760 1749
1761 ieee80211_led_init(local); 1750 ieee80211_led_init(local);
@@ -1765,7 +1754,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1765fail_wep: 1754fail_wep:
1766 rate_control_deinitialize(local); 1755 rate_control_deinitialize(local);
1767fail_rate: 1756fail_rate:
1768 ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
1769 unregister_netdevice(local->mdev); 1757 unregister_netdevice(local->mdev);
1770 local->mdev = NULL; 1758 local->mdev = NULL;
1771fail_dev: 1759fail_dev:
@@ -1775,10 +1763,8 @@ fail_sta_info:
1775 debugfs_hw_del(local); 1763 debugfs_hw_del(local);
1776 destroy_workqueue(local->hw.workqueue); 1764 destroy_workqueue(local->hw.workqueue);
1777fail_workqueue: 1765fail_workqueue:
1778 if (local->mdev != NULL) { 1766 if (local->mdev)
1779 ieee80211_if_free(local->mdev); 1767 free_netdev(local->mdev);
1780 local->mdev = NULL;
1781 }
1782fail_mdev_alloc: 1768fail_mdev_alloc:
1783 wiphy_unregister(local->hw.wiphy); 1769 wiphy_unregister(local->hw.wiphy);
1784 return result; 1770 return result;
@@ -1788,42 +1774,27 @@ EXPORT_SYMBOL(ieee80211_register_hw);
1788void ieee80211_unregister_hw(struct ieee80211_hw *hw) 1774void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1789{ 1775{
1790 struct ieee80211_local *local = hw_to_local(hw); 1776 struct ieee80211_local *local = hw_to_local(hw);
1791 struct ieee80211_sub_if_data *sdata, *tmp;
1792 1777
1793 tasklet_kill(&local->tx_pending_tasklet); 1778 tasklet_kill(&local->tx_pending_tasklet);
1794 tasklet_kill(&local->tasklet); 1779 tasklet_kill(&local->tasklet);
1795 1780
1796 rtnl_lock(); 1781 rtnl_lock();
1797 1782
1798 BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED);
1799
1800 local->reg_state = IEEE80211_DEV_UNREGISTERED;
1801
1802 /* 1783 /*
1803 * At this point, interface list manipulations are fine 1784 * At this point, interface list manipulations are fine
1804 * because the driver cannot be handing us frames any 1785 * because the driver cannot be handing us frames any
1805 * more and the tasklet is killed. 1786 * more and the tasklet is killed.
1806 */ 1787 */
1807 1788
1808 /* 1789 /* First, we remove all virtual interfaces. */
1809 * First, we remove all non-master interfaces. Do this because they 1790 ieee80211_remove_interfaces(local);
1810 * may have bss pointer dependency on the master, and when we free
1811 * the master these would be freed as well, breaking our list
1812 * iteration completely.
1813 */
1814 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1815 if (sdata->dev == local->mdev)
1816 continue;
1817 list_del(&sdata->list);
1818 __ieee80211_if_del(local, sdata);
1819 }
1820 1791
1821 /* then, finally, remove the master interface */ 1792 /* then, finally, remove the master interface */
1822 __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev)); 1793 unregister_netdevice(local->mdev);
1823 1794
1824 rtnl_unlock(); 1795 rtnl_unlock();
1825 1796
1826 ieee80211_rx_bss_list_deinit(local->mdev); 1797 ieee80211_rx_bss_list_deinit(local);
1827 ieee80211_clear_tx_pending(local); 1798 ieee80211_clear_tx_pending(local);
1828 sta_info_stop(local); 1799 sta_info_stop(local);
1829 rate_control_deinitialize(local); 1800 rate_control_deinitialize(local);
@@ -1840,8 +1811,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1840 wiphy_unregister(local->hw.wiphy); 1811 wiphy_unregister(local->hw.wiphy);
1841 ieee80211_wep_free(local); 1812 ieee80211_wep_free(local);
1842 ieee80211_led_exit(local); 1813 ieee80211_led_exit(local);
1843 ieee80211_if_free(local->mdev); 1814 free_netdev(local->mdev);
1844 local->mdev = NULL;
1845} 1815}
1846EXPORT_SYMBOL(ieee80211_unregister_hw); 1816EXPORT_SYMBOL(ieee80211_unregister_hw);
1847 1817
@@ -1858,27 +1828,17 @@ static int __init ieee80211_init(void)
1858 struct sk_buff *skb; 1828 struct sk_buff *skb;
1859 int ret; 1829 int ret;
1860 1830
1861 BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); 1831 BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
1832 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
1833 IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
1862 1834
1863 ret = rc80211_pid_init(); 1835 ret = rc80211_pid_init();
1864 if (ret) 1836 if (ret)
1865 goto out; 1837 return ret;
1866
1867 ret = ieee80211_wme_register();
1868 if (ret) {
1869 printk(KERN_DEBUG "ieee80211_init: failed to "
1870 "initialize WME (err=%d)\n", ret);
1871 goto out_cleanup_pid;
1872 }
1873 1838
1874 ieee80211_debugfs_netdev_init(); 1839 ieee80211_debugfs_netdev_init();
1875 1840
1876 return 0; 1841 return 0;
1877
1878 out_cleanup_pid:
1879 rc80211_pid_exit();
1880 out:
1881 return ret;
1882} 1842}
1883 1843
1884static void __exit ieee80211_exit(void) 1844static void __exit ieee80211_exit(void)
@@ -1894,7 +1854,6 @@ static void __exit ieee80211_exit(void)
1894 if (mesh_allocated) 1854 if (mesh_allocated)
1895 ieee80211s_stop(); 1855 ieee80211s_stop();
1896 1856
1897 ieee80211_wme_unregister();
1898 ieee80211_debugfs_netdev_exit(); 1857 ieee80211_debugfs_netdev_exit();
1899} 1858}
1900 1859
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 697ef67f96b6..b5933b271491 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -315,6 +315,13 @@ struct mesh_table *mesh_table_alloc(int size_order)
315 return newtbl; 315 return newtbl;
316} 316}
317 317
318static void __mesh_table_free(struct mesh_table *tbl)
319{
320 kfree(tbl->hash_buckets);
321 kfree(tbl->hashwlock);
322 kfree(tbl);
323}
324
318void mesh_table_free(struct mesh_table *tbl, bool free_leafs) 325void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
319{ 326{
320 struct hlist_head *mesh_hash; 327 struct hlist_head *mesh_hash;
@@ -330,9 +337,7 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
330 } 337 }
331 spin_unlock(&tbl->hashwlock[i]); 338 spin_unlock(&tbl->hashwlock[i]);
332 } 339 }
333 kfree(tbl->hash_buckets); 340 __mesh_table_free(tbl);
334 kfree(tbl->hashwlock);
335 kfree(tbl);
336} 341}
337 342
338static void ieee80211_mesh_path_timer(unsigned long data) 343static void ieee80211_mesh_path_timer(unsigned long data)
@@ -349,21 +354,16 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
349{ 354{
350 struct mesh_table *newtbl; 355 struct mesh_table *newtbl;
351 struct hlist_head *oldhash; 356 struct hlist_head *oldhash;
352 struct hlist_node *p; 357 struct hlist_node *p, *q;
353 int err = 0;
354 int i; 358 int i;
355 359
356 if (atomic_read(&tbl->entries) 360 if (atomic_read(&tbl->entries)
357 < tbl->mean_chain_len * (tbl->hash_mask + 1)) { 361 < tbl->mean_chain_len * (tbl->hash_mask + 1))
358 err = -EPERM;
359 goto endgrow; 362 goto endgrow;
360 }
361 363
362 newtbl = mesh_table_alloc(tbl->size_order + 1); 364 newtbl = mesh_table_alloc(tbl->size_order + 1);
363 if (!newtbl) { 365 if (!newtbl)
364 err = -ENOMEM;
365 goto endgrow; 366 goto endgrow;
366 }
367 367
368 newtbl->free_node = tbl->free_node; 368 newtbl->free_node = tbl->free_node;
369 newtbl->mean_chain_len = tbl->mean_chain_len; 369 newtbl->mean_chain_len = tbl->mean_chain_len;
@@ -373,13 +373,19 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
373 oldhash = tbl->hash_buckets; 373 oldhash = tbl->hash_buckets;
374 for (i = 0; i <= tbl->hash_mask; i++) 374 for (i = 0; i <= tbl->hash_mask; i++)
375 hlist_for_each(p, &oldhash[i]) 375 hlist_for_each(p, &oldhash[i])
376 tbl->copy_node(p, newtbl); 376 if (tbl->copy_node(p, newtbl) < 0)
377 goto errcopy;
377 378
379 return newtbl;
380
381errcopy:
382 for (i = 0; i <= newtbl->hash_mask; i++) {
383 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
384 tbl->free_node(p, 0);
385 }
386 __mesh_table_free(tbl);
378endgrow: 387endgrow:
379 if (err) 388 return NULL;
380 return NULL;
381 else
382 return newtbl;
383} 389}
384 390
385/** 391/**
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 2e161f6d8288..669eafafe497 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -109,7 +109,7 @@ struct mesh_table {
109 __u32 hash_rnd; /* Used for hash generation */ 109 __u32 hash_rnd; /* Used for hash generation */
110 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ 110 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
111 void (*free_node) (struct hlist_node *p, bool free_leafs); 111 void (*free_node) (struct hlist_node *p, bool free_leafs);
112 void (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 112 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
113 int size_order; 113 int size_order;
114 int mean_chain_len; 114 int mean_chain_len;
115}; 115};
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index af0cd1e3e213..7fa149e230e6 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -26,7 +26,7 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
26{ 26{
27 if (ae) 27 if (ae)
28 offset += 6; 28 offset += 6;
29 return le32_to_cpu(get_unaligned((__le32 *) (preq_elem + offset))); 29 return get_unaligned_le32(preq_elem + offset);
30} 30}
31 31
32/* HWMP IE processing macros */ 32/* HWMP IE processing macros */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 99c2d360888e..5f88a2e6ee50 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -158,19 +158,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC; 159 return -ENOSPC;
160 160
161 err = -ENOMEM;
161 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 162 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
162 if (!new_mpath) { 163 if (!new_mpath)
163 atomic_dec(&sdata->u.sta.mpaths); 164 goto err_path_alloc;
164 err = -ENOMEM; 165
165 goto endadd2;
166 }
167 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 166 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
168 if (!new_node) { 167 if (!new_node)
169 kfree(new_mpath); 168 goto err_node_alloc;
170 atomic_dec(&sdata->u.sta.mpaths);
171 err = -ENOMEM;
172 goto endadd2;
173 }
174 169
175 read_lock(&pathtbl_resize_lock); 170 read_lock(&pathtbl_resize_lock);
176 memcpy(new_mpath->dst, dst, ETH_ALEN); 171 memcpy(new_mpath->dst, dst, ETH_ALEN);
@@ -189,16 +184,11 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
189 184
190 spin_lock(&mesh_paths->hashwlock[hash_idx]); 185 spin_lock(&mesh_paths->hashwlock[hash_idx]);
191 186
187 err = -EEXIST;
192 hlist_for_each_entry(node, n, bucket, list) { 188 hlist_for_each_entry(node, n, bucket, list) {
193 mpath = node->mpath; 189 mpath = node->mpath;
194 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) 190 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
195 == 0) { 191 goto err_exists;
196 err = -EEXIST;
197 atomic_dec(&sdata->u.sta.mpaths);
198 kfree(new_node);
199 kfree(new_mpath);
200 goto endadd;
201 }
202 } 192 }
203 193
204 hlist_add_head_rcu(&new_node->list, bucket); 194 hlist_add_head_rcu(&new_node->list, bucket);
@@ -206,10 +196,9 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
206 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 196 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
207 grow = 1; 197 grow = 1;
208 198
209endadd:
210 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 199 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
211 read_unlock(&pathtbl_resize_lock); 200 read_unlock(&pathtbl_resize_lock);
212 if (!err && grow) { 201 if (grow) {
213 struct mesh_table *oldtbl, *newtbl; 202 struct mesh_table *oldtbl, *newtbl;
214 203
215 write_lock(&pathtbl_resize_lock); 204 write_lock(&pathtbl_resize_lock);
@@ -217,7 +206,7 @@ endadd:
217 newtbl = mesh_table_grow(mesh_paths); 206 newtbl = mesh_table_grow(mesh_paths);
218 if (!newtbl) { 207 if (!newtbl) {
219 write_unlock(&pathtbl_resize_lock); 208 write_unlock(&pathtbl_resize_lock);
220 return -ENOMEM; 209 return 0;
221 } 210 }
222 rcu_assign_pointer(mesh_paths, newtbl); 211 rcu_assign_pointer(mesh_paths, newtbl);
223 write_unlock(&pathtbl_resize_lock); 212 write_unlock(&pathtbl_resize_lock);
@@ -225,7 +214,16 @@ endadd:
225 synchronize_rcu(); 214 synchronize_rcu();
226 mesh_table_free(oldtbl, false); 215 mesh_table_free(oldtbl, false);
227 } 216 }
228endadd2: 217 return 0;
218
219err_exists:
220 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
221 read_unlock(&pathtbl_resize_lock);
222 kfree(new_node);
223err_node_alloc:
224 kfree(new_mpath);
225err_path_alloc:
226 atomic_dec(&sdata->u.sta.mpaths);
229 return err; 227 return err;
230} 228}
231 229
@@ -264,7 +262,6 @@ void mesh_plink_broken(struct sta_info *sta)
264 } 262 }
265 rcu_read_unlock(); 263 rcu_read_unlock();
266} 264}
267EXPORT_SYMBOL(mesh_plink_broken);
268 265
269/** 266/**
270 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 267 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
@@ -460,25 +457,28 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
460 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 457 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
461 mpath = node->mpath; 458 mpath = node->mpath;
462 hlist_del_rcu(p); 459 hlist_del_rcu(p);
463 synchronize_rcu();
464 if (free_leafs) 460 if (free_leafs)
465 kfree(mpath); 461 kfree(mpath);
466 kfree(node); 462 kfree(node);
467} 463}
468 464
469static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) 465static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
470{ 466{
471 struct mesh_path *mpath; 467 struct mesh_path *mpath;
472 struct mpath_node *node, *new_node; 468 struct mpath_node *node, *new_node;
473 u32 hash_idx; 469 u32 hash_idx;
474 470
471 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
472 if (new_node == NULL)
473 return -ENOMEM;
474
475 node = hlist_entry(p, struct mpath_node, list); 475 node = hlist_entry(p, struct mpath_node, list);
476 mpath = node->mpath; 476 mpath = node->mpath;
477 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
478 new_node->mpath = mpath; 477 new_node->mpath = mpath;
479 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); 478 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
480 hlist_add_head(&new_node->list, 479 hlist_add_head(&new_node->list,
481 &newtbl->hash_buckets[hash_idx]); 480 &newtbl->hash_buckets[hash_idx]);
481 return 0;
482} 482}
483 483
484int mesh_pathtbl_init(void) 484int mesh_pathtbl_init(void)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 37f0c2b94ae7..9efeb1f07025 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -79,7 +79,7 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
79 * 79 *
80 * @sta: mes peer link to restart 80 * @sta: mes peer link to restart
81 * 81 *
82 * Locking: this function must be called holding sta->plink_lock 82 * Locking: this function must be called holding sta->lock
83 */ 83 */
84static inline void mesh_plink_fsm_restart(struct sta_info *sta) 84static inline void mesh_plink_fsm_restart(struct sta_info *sta)
85{ 85{
@@ -105,7 +105,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 105 if (!sta)
106 return NULL; 106 return NULL;
107 107
108 sta->flags |= WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED;
109 sta->supp_rates[local->hw.conf.channel->band] = rates; 109 sta->supp_rates[local->hw.conf.channel->band] = rates;
110 110
111 return sta; 111 return sta;
@@ -118,7 +118,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
118 * 118 *
119 * All mesh paths with this peer as next hop will be flushed 119 * All mesh paths with this peer as next hop will be flushed
120 * 120 *
121 * Locking: the caller must hold sta->plink_lock 121 * Locking: the caller must hold sta->lock
122 */ 122 */
123static void __mesh_plink_deactivate(struct sta_info *sta) 123static void __mesh_plink_deactivate(struct sta_info *sta)
124{ 124{
@@ -139,9 +139,9 @@ static void __mesh_plink_deactivate(struct sta_info *sta)
139 */ 139 */
140void mesh_plink_deactivate(struct sta_info *sta) 140void mesh_plink_deactivate(struct sta_info *sta)
141{ 141{
142 spin_lock_bh(&sta->plink_lock); 142 spin_lock_bh(&sta->lock);
143 __mesh_plink_deactivate(sta); 143 __mesh_plink_deactivate(sta);
144 spin_unlock_bh(&sta->plink_lock); 144 spin_unlock_bh(&sta->lock);
145} 145}
146 146
147static int mesh_plink_frame_tx(struct net_device *dev, 147static int mesh_plink_frame_tx(struct net_device *dev,
@@ -270,10 +270,10 @@ static void mesh_plink_timer(unsigned long data)
270 */ 270 */
271 sta = (struct sta_info *) data; 271 sta = (struct sta_info *) data;
272 272
273 spin_lock_bh(&sta->plink_lock); 273 spin_lock_bh(&sta->lock);
274 if (sta->ignore_plink_timer) { 274 if (sta->ignore_plink_timer) {
275 sta->ignore_plink_timer = false; 275 sta->ignore_plink_timer = false;
276 spin_unlock_bh(&sta->plink_lock); 276 spin_unlock_bh(&sta->lock);
277 return; 277 return;
278 } 278 }
279 mpl_dbg("Mesh plink timer for %s fired on state %d\n", 279 mpl_dbg("Mesh plink timer for %s fired on state %d\n",
@@ -298,7 +298,7 @@ static void mesh_plink_timer(unsigned long data)
298 rand % sta->plink_timeout; 298 rand % sta->plink_timeout;
299 ++sta->plink_retries; 299 ++sta->plink_retries;
300 mod_plink_timer(sta, sta->plink_timeout); 300 mod_plink_timer(sta, sta->plink_timeout);
301 spin_unlock_bh(&sta->plink_lock); 301 spin_unlock_bh(&sta->lock);
302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
303 0, 0); 303 0, 0);
304 break; 304 break;
@@ -311,7 +311,7 @@ static void mesh_plink_timer(unsigned long data)
311 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); 311 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
312 sta->plink_state = PLINK_HOLDING; 312 sta->plink_state = PLINK_HOLDING;
313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
314 spin_unlock_bh(&sta->plink_lock); 314 spin_unlock_bh(&sta->lock);
315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, 315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid,
316 reason); 316 reason);
317 break; 317 break;
@@ -319,10 +319,10 @@ static void mesh_plink_timer(unsigned long data)
319 /* holding timer */ 319 /* holding timer */
320 del_timer(&sta->plink_timer); 320 del_timer(&sta->plink_timer);
321 mesh_plink_fsm_restart(sta); 321 mesh_plink_fsm_restart(sta);
322 spin_unlock_bh(&sta->plink_lock); 322 spin_unlock_bh(&sta->lock);
323 break; 323 break;
324 default: 324 default:
325 spin_unlock_bh(&sta->plink_lock); 325 spin_unlock_bh(&sta->lock);
326 break; 326 break;
327 } 327 }
328} 328}
@@ -344,16 +344,16 @@ int mesh_plink_open(struct sta_info *sta)
344 DECLARE_MAC_BUF(mac); 344 DECLARE_MAC_BUF(mac);
345#endif 345#endif
346 346
347 spin_lock_bh(&sta->plink_lock); 347 spin_lock_bh(&sta->lock);
348 get_random_bytes(&llid, 2); 348 get_random_bytes(&llid, 2);
349 sta->llid = llid; 349 sta->llid = llid;
350 if (sta->plink_state != PLINK_LISTEN) { 350 if (sta->plink_state != PLINK_LISTEN) {
351 spin_unlock_bh(&sta->plink_lock); 351 spin_unlock_bh(&sta->lock);
352 return -EBUSY; 352 return -EBUSY;
353 } 353 }
354 sta->plink_state = PLINK_OPN_SNT; 354 sta->plink_state = PLINK_OPN_SNT;
355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
356 spin_unlock_bh(&sta->plink_lock); 356 spin_unlock_bh(&sta->lock);
357 mpl_dbg("Mesh plink: starting establishment with %s\n", 357 mpl_dbg("Mesh plink: starting establishment with %s\n",
358 print_mac(mac, sta->addr)); 358 print_mac(mac, sta->addr));
359 359
@@ -367,10 +367,10 @@ void mesh_plink_block(struct sta_info *sta)
367 DECLARE_MAC_BUF(mac); 367 DECLARE_MAC_BUF(mac);
368#endif 368#endif
369 369
370 spin_lock_bh(&sta->plink_lock); 370 spin_lock_bh(&sta->lock);
371 __mesh_plink_deactivate(sta); 371 __mesh_plink_deactivate(sta);
372 sta->plink_state = PLINK_BLOCKED; 372 sta->plink_state = PLINK_BLOCKED;
373 spin_unlock_bh(&sta->plink_lock); 373 spin_unlock_bh(&sta->lock);
374} 374}
375 375
376int mesh_plink_close(struct sta_info *sta) 376int mesh_plink_close(struct sta_info *sta)
@@ -383,14 +383,14 @@ int mesh_plink_close(struct sta_info *sta)
383 383
384 mpl_dbg("Mesh plink: closing link with %s\n", 384 mpl_dbg("Mesh plink: closing link with %s\n",
385 print_mac(mac, sta->addr)); 385 print_mac(mac, sta->addr));
386 spin_lock_bh(&sta->plink_lock); 386 spin_lock_bh(&sta->lock);
387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); 387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
388 reason = sta->reason; 388 reason = sta->reason;
389 389
390 if (sta->plink_state == PLINK_LISTEN || 390 if (sta->plink_state == PLINK_LISTEN ||
391 sta->plink_state == PLINK_BLOCKED) { 391 sta->plink_state == PLINK_BLOCKED) {
392 mesh_plink_fsm_restart(sta); 392 mesh_plink_fsm_restart(sta);
393 spin_unlock_bh(&sta->plink_lock); 393 spin_unlock_bh(&sta->lock);
394 return 0; 394 return 0;
395 } else if (sta->plink_state == PLINK_ESTAB) { 395 } else if (sta->plink_state == PLINK_ESTAB) {
396 __mesh_plink_deactivate(sta); 396 __mesh_plink_deactivate(sta);
@@ -402,7 +402,7 @@ int mesh_plink_close(struct sta_info *sta)
402 sta->plink_state = PLINK_HOLDING; 402 sta->plink_state = PLINK_HOLDING;
403 llid = sta->llid; 403 llid = sta->llid;
404 plid = sta->plid; 404 plid = sta->plid;
405 spin_unlock_bh(&sta->plink_lock); 405 spin_unlock_bh(&sta->lock);
406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, 406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid,
407 plid, reason); 407 plid, reason);
408 return 0; 408 return 0;
@@ -490,7 +490,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
490 /* avoid warning */ 490 /* avoid warning */
491 break; 491 break;
492 } 492 }
493 spin_lock_bh(&sta->plink_lock); 493 spin_lock_bh(&sta->lock);
494 } else if (!sta) { 494 } else if (!sta) {
495 /* ftype == PLINK_OPEN */ 495 /* ftype == PLINK_OPEN */
496 u64 rates; 496 u64 rates;
@@ -512,9 +512,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
512 return; 512 return;
513 } 513 }
514 event = OPN_ACPT; 514 event = OPN_ACPT;
515 spin_lock_bh(&sta->plink_lock); 515 spin_lock_bh(&sta->lock);
516 } else { 516 } else {
517 spin_lock_bh(&sta->plink_lock); 517 spin_lock_bh(&sta->lock);
518 switch (ftype) { 518 switch (ftype) {
519 case PLINK_OPEN: 519 case PLINK_OPEN:
520 if (!mesh_plink_free_count(sdata) || 520 if (!mesh_plink_free_count(sdata) ||
@@ -551,7 +551,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
551 break; 551 break;
552 default: 552 default:
553 mpl_dbg("Mesh plink: unknown frame subtype\n"); 553 mpl_dbg("Mesh plink: unknown frame subtype\n");
554 spin_unlock_bh(&sta->plink_lock); 554 spin_unlock_bh(&sta->lock);
555 rcu_read_unlock(); 555 rcu_read_unlock();
556 return; 556 return;
557 } 557 }
@@ -568,7 +568,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
568 switch (event) { 568 switch (event) {
569 case CLS_ACPT: 569 case CLS_ACPT:
570 mesh_plink_fsm_restart(sta); 570 mesh_plink_fsm_restart(sta);
571 spin_unlock_bh(&sta->plink_lock); 571 spin_unlock_bh(&sta->lock);
572 break; 572 break;
573 case OPN_ACPT: 573 case OPN_ACPT:
574 sta->plink_state = PLINK_OPN_RCVD; 574 sta->plink_state = PLINK_OPN_RCVD;
@@ -576,14 +576,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
576 get_random_bytes(&llid, 2); 576 get_random_bytes(&llid, 2);
577 sta->llid = llid; 577 sta->llid = llid;
578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
579 spin_unlock_bh(&sta->plink_lock); 579 spin_unlock_bh(&sta->lock);
580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
581 0, 0); 581 0, 0);
582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, 582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr,
583 llid, plid, 0); 583 llid, plid, 0);
584 break; 584 break;
585 default: 585 default:
586 spin_unlock_bh(&sta->plink_lock); 586 spin_unlock_bh(&sta->lock);
587 break; 587 break;
588 } 588 }
589 break; 589 break;
@@ -603,7 +603,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
603 sta->ignore_plink_timer = true; 603 sta->ignore_plink_timer = true;
604 604
605 llid = sta->llid; 605 llid = sta->llid;
606 spin_unlock_bh(&sta->plink_lock); 606 spin_unlock_bh(&sta->lock);
607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
608 plid, reason); 608 plid, reason);
609 break; 609 break;
@@ -612,7 +612,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
612 sta->plink_state = PLINK_OPN_RCVD; 612 sta->plink_state = PLINK_OPN_RCVD;
613 sta->plid = plid; 613 sta->plid = plid;
614 llid = sta->llid; 614 llid = sta->llid;
615 spin_unlock_bh(&sta->plink_lock); 615 spin_unlock_bh(&sta->lock);
616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
617 plid, 0); 617 plid, 0);
618 break; 618 break;
@@ -622,10 +622,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
622 dot11MeshConfirmTimeout(sdata))) 622 dot11MeshConfirmTimeout(sdata)))
623 sta->ignore_plink_timer = true; 623 sta->ignore_plink_timer = true;
624 624
625 spin_unlock_bh(&sta->plink_lock); 625 spin_unlock_bh(&sta->lock);
626 break; 626 break;
627 default: 627 default:
628 spin_unlock_bh(&sta->plink_lock); 628 spin_unlock_bh(&sta->lock);
629 break; 629 break;
630 } 630 }
631 break; 631 break;
@@ -645,13 +645,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
645 sta->ignore_plink_timer = true; 645 sta->ignore_plink_timer = true;
646 646
647 llid = sta->llid; 647 llid = sta->llid;
648 spin_unlock_bh(&sta->plink_lock); 648 spin_unlock_bh(&sta->lock);
649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
650 plid, reason); 650 plid, reason);
651 break; 651 break;
652 case OPN_ACPT: 652 case OPN_ACPT:
653 llid = sta->llid; 653 llid = sta->llid;
654 spin_unlock_bh(&sta->plink_lock); 654 spin_unlock_bh(&sta->lock);
655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
656 plid, 0); 656 plid, 0);
657 break; 657 break;
@@ -659,12 +659,12 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
659 del_timer(&sta->plink_timer); 659 del_timer(&sta->plink_timer);
660 sta->plink_state = PLINK_ESTAB; 660 sta->plink_state = PLINK_ESTAB;
661 mesh_plink_inc_estab_count(sdata); 661 mesh_plink_inc_estab_count(sdata);
662 spin_unlock_bh(&sta->plink_lock); 662 spin_unlock_bh(&sta->lock);
663 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 663 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
664 print_mac(mac, sta->addr)); 664 print_mac(mac, sta->addr));
665 break; 665 break;
666 default: 666 default:
667 spin_unlock_bh(&sta->plink_lock); 667 spin_unlock_bh(&sta->lock);
668 break; 668 break;
669 } 669 }
670 break; 670 break;
@@ -684,7 +684,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
684 sta->ignore_plink_timer = true; 684 sta->ignore_plink_timer = true;
685 685
686 llid = sta->llid; 686 llid = sta->llid;
687 spin_unlock_bh(&sta->plink_lock); 687 spin_unlock_bh(&sta->lock);
688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
689 plid, reason); 689 plid, reason);
690 break; 690 break;
@@ -692,14 +692,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
692 del_timer(&sta->plink_timer); 692 del_timer(&sta->plink_timer);
693 sta->plink_state = PLINK_ESTAB; 693 sta->plink_state = PLINK_ESTAB;
694 mesh_plink_inc_estab_count(sdata); 694 mesh_plink_inc_estab_count(sdata);
695 spin_unlock_bh(&sta->plink_lock); 695 spin_unlock_bh(&sta->lock);
696 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 696 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
697 print_mac(mac, sta->addr)); 697 print_mac(mac, sta->addr));
698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
699 plid, 0); 699 plid, 0);
700 break; 700 break;
701 default: 701 default:
702 spin_unlock_bh(&sta->plink_lock); 702 spin_unlock_bh(&sta->lock);
703 break; 703 break;
704 } 704 }
705 break; 705 break;
@@ -713,18 +713,18 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
713 sta->plink_state = PLINK_HOLDING; 713 sta->plink_state = PLINK_HOLDING;
714 llid = sta->llid; 714 llid = sta->llid;
715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
716 spin_unlock_bh(&sta->plink_lock); 716 spin_unlock_bh(&sta->lock);
717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
718 plid, reason); 718 plid, reason);
719 break; 719 break;
720 case OPN_ACPT: 720 case OPN_ACPT:
721 llid = sta->llid; 721 llid = sta->llid;
722 spin_unlock_bh(&sta->plink_lock); 722 spin_unlock_bh(&sta->lock);
723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
724 plid, 0); 724 plid, 0);
725 break; 725 break;
726 default: 726 default:
727 spin_unlock_bh(&sta->plink_lock); 727 spin_unlock_bh(&sta->lock);
728 break; 728 break;
729 } 729 }
730 break; 730 break;
@@ -734,7 +734,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
734 if (del_timer(&sta->plink_timer)) 734 if (del_timer(&sta->plink_timer))
735 sta->ignore_plink_timer = 1; 735 sta->ignore_plink_timer = 1;
736 mesh_plink_fsm_restart(sta); 736 mesh_plink_fsm_restart(sta);
737 spin_unlock_bh(&sta->plink_lock); 737 spin_unlock_bh(&sta->lock);
738 break; 738 break;
739 case OPN_ACPT: 739 case OPN_ACPT:
740 case CNF_ACPT: 740 case CNF_ACPT:
@@ -742,19 +742,19 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
742 case CNF_RJCT: 742 case CNF_RJCT:
743 llid = sta->llid; 743 llid = sta->llid;
744 reason = sta->reason; 744 reason = sta->reason;
745 spin_unlock_bh(&sta->plink_lock); 745 spin_unlock_bh(&sta->lock);
746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
747 plid, reason); 747 plid, reason);
748 break; 748 break;
749 default: 749 default:
750 spin_unlock_bh(&sta->plink_lock); 750 spin_unlock_bh(&sta->lock);
751 } 751 }
752 break; 752 break;
753 default: 753 default:
754 /* should not get here, PLINK_BLOCKED is dealt with at the 754 /* should not get here, PLINK_BLOCKED is dealt with at the
755 * beggining of the function 755 * beggining of the function
756 */ 756 */
757 spin_unlock_bh(&sta->plink_lock); 757 spin_unlock_bh(&sta->lock);
758 break; 758 break;
759 } 759 }
760 760
diff --git a/net/mac80211/michael.c b/net/mac80211/michael.c
index 0f844f7895f1..408649bd4702 100644
--- a/net/mac80211/michael.c
+++ b/net/mac80211/michael.c
@@ -6,85 +6,68 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9
10#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/bitops.h>
11#include <linux/ieee80211.h>
12#include <asm/unaligned.h>
11 13
12#include "michael.h" 14#include "michael.h"
13 15
14static inline u32 rotr(u32 val, int bits) 16static void michael_block(struct michael_mic_ctx *mctx, u32 val)
15{
16 return (val >> bits) | (val << (32 - bits));
17}
18
19
20static inline u32 rotl(u32 val, int bits)
21{
22 return (val << bits) | (val >> (32 - bits));
23}
24
25
26static inline u32 xswap(u32 val)
27{
28 return ((val & 0xff00ff00) >> 8) | ((val & 0x00ff00ff) << 8);
29}
30
31
32#define michael_block(l, r) \
33do { \
34 r ^= rotl(l, 17); \
35 l += r; \
36 r ^= xswap(l); \
37 l += r; \
38 r ^= rotl(l, 3); \
39 l += r; \
40 r ^= rotr(l, 2); \
41 l += r; \
42} while (0)
43
44
45static inline u32 michael_get32(u8 *data)
46{ 17{
47 return data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); 18 mctx->l ^= val;
19 mctx->r ^= rol32(mctx->l, 17);
20 mctx->l += mctx->r;
21 mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) |
22 ((mctx->l & 0x00ff00ff) << 8);
23 mctx->l += mctx->r;
24 mctx->r ^= rol32(mctx->l, 3);
25 mctx->l += mctx->r;
26 mctx->r ^= ror32(mctx->l, 2);
27 mctx->l += mctx->r;
48} 28}
49 29
50 30static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key,
51static inline void michael_put32(u32 val, u8 *data) 31 struct ieee80211_hdr *hdr)
52{ 32{
53 data[0] = val & 0xff; 33 u8 *da, *sa, tid;
54 data[1] = (val >> 8) & 0xff; 34
55 data[2] = (val >> 16) & 0xff; 35 da = ieee80211_get_DA(hdr);
56 data[3] = (val >> 24) & 0xff; 36 sa = ieee80211_get_SA(hdr);
37 if (ieee80211_is_data_qos(hdr->frame_control))
38 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
39 else
40 tid = 0;
41
42 mctx->l = get_unaligned_le32(key);
43 mctx->r = get_unaligned_le32(key + 4);
44
45 /*
46 * A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC
47 * calculation, but it is _not_ transmitted
48 */
49 michael_block(mctx, get_unaligned_le32(da));
50 michael_block(mctx, get_unaligned_le16(&da[4]) |
51 (get_unaligned_le16(sa) << 16));
52 michael_block(mctx, get_unaligned_le32(&sa[2]));
53 michael_block(mctx, tid);
57} 54}
58 55
59 56void michael_mic(const u8 *key, struct ieee80211_hdr *hdr,
60void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, 57 const u8 *data, size_t data_len, u8 *mic)
61 u8 *data, size_t data_len, u8 *mic)
62{ 58{
63 u32 l, r, val; 59 u32 val;
64 size_t block, blocks, left; 60 size_t block, blocks, left;
61 struct michael_mic_ctx mctx;
65 62
66 l = michael_get32(key); 63 michael_mic_hdr(&mctx, key, hdr);
67 r = michael_get32(key + 4);
68
69 /* A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC
70 * calculation, but it is _not_ transmitted */
71 l ^= michael_get32(da);
72 michael_block(l, r);
73 l ^= da[4] | (da[5] << 8) | (sa[0] << 16) | (sa[1] << 24);
74 michael_block(l, r);
75 l ^= michael_get32(&sa[2]);
76 michael_block(l, r);
77 l ^= priority;
78 michael_block(l, r);
79 64
80 /* Real data */ 65 /* Real data */
81 blocks = data_len / 4; 66 blocks = data_len / 4;
82 left = data_len % 4; 67 left = data_len % 4;
83 68
84 for (block = 0; block < blocks; block++) { 69 for (block = 0; block < blocks; block++)
85 l ^= michael_get32(&data[block * 4]); 70 michael_block(&mctx, get_unaligned_le32(&data[block * 4]));
86 michael_block(l, r);
87 }
88 71
89 /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make 72 /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make
90 * total length a multiple of 4. */ 73 * total length a multiple of 4. */
@@ -94,11 +77,10 @@ void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority,
94 left--; 77 left--;
95 val |= data[blocks * 4 + left]; 78 val |= data[blocks * 4 + left];
96 } 79 }
97 l ^= val;
98 michael_block(l, r);
99 /* last block is zero, so l ^ 0 = l */
100 michael_block(l, r);
101 80
102 michael_put32(l, mic); 81 michael_block(&mctx, val);
103 michael_put32(r, mic + 4); 82 michael_block(&mctx, 0);
83
84 put_unaligned_le32(mctx.l, mic);
85 put_unaligned_le32(mctx.r, mic + 4);
104} 86}
diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h
index 2e6aebabeea1..3b848dad9587 100644
--- a/net/mac80211/michael.h
+++ b/net/mac80211/michael.h
@@ -14,7 +14,11 @@
14 14
15#define MICHAEL_MIC_LEN 8 15#define MICHAEL_MIC_LEN 8
16 16
17void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, 17struct michael_mic_ctx {
18 u8 *data, size_t data_len, u8 *mic); 18 u32 l, r;
19};
20
21void michael_mic(const u8 *key, struct ieee80211_hdr *hdr,
22 const u8 *data, size_t data_len, u8 *mic);
19 23
20#endif /* MICHAEL_H */ 24#endif /* MICHAEL_H */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b404537c0bcd..d7c371e36bf0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -78,7 +78,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
78static struct ieee80211_sta_bss * 78static struct ieee80211_sta_bss *
79ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, 79ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
80 u8 *ssid, u8 ssid_len); 80 u8 *ssid, u8 ssid_len);
81static void ieee80211_rx_bss_put(struct net_device *dev, 81static void ieee80211_rx_bss_put(struct ieee80211_local *local,
82 struct ieee80211_sta_bss *bss); 82 struct ieee80211_sta_bss *bss);
83static int ieee80211_sta_find_ibss(struct net_device *dev, 83static int ieee80211_sta_find_ibss(struct net_device *dev,
84 struct ieee80211_if_sta *ifsta); 84 struct ieee80211_if_sta *ifsta);
@@ -87,6 +87,7 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
87 u8 *ssid, size_t ssid_len); 87 u8 *ssid, size_t ssid_len);
88static int ieee80211_sta_config_auth(struct net_device *dev, 88static int ieee80211_sta_config_auth(struct net_device *dev,
89 struct ieee80211_if_sta *ifsta); 89 struct ieee80211_if_sta *ifsta);
90static void sta_rx_agg_session_timer_expired(unsigned long data);
90 91
91 92
92void ieee802_11_parse_elems(u8 *start, size_t len, 93void ieee802_11_parse_elems(u8 *start, size_t len,
@@ -203,6 +204,25 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
203 elems->perr = pos; 204 elems->perr = pos;
204 elems->perr_len = elen; 205 elems->perr_len = elen;
205 break; 206 break;
207 case WLAN_EID_CHANNEL_SWITCH:
208 elems->ch_switch_elem = pos;
209 elems->ch_switch_elem_len = elen;
210 break;
211 case WLAN_EID_QUIET:
212 if (!elems->quiet_elem) {
213 elems->quiet_elem = pos;
214 elems->quiet_elem_len = elen;
215 }
216 elems->num_of_quiet_elem++;
217 break;
218 case WLAN_EID_COUNTRY:
219 elems->country_elem = pos;
220 elems->country_elem_len = elen;
221 break;
222 case WLAN_EID_PWR_CONSTRAINT:
223 elems->pwr_constr_elem = pos;
224 elems->pwr_constr_elem_len = elen;
225 break;
206 default: 226 default:
207 break; 227 break;
208 } 228 }
@@ -256,19 +276,8 @@ static void ieee80211_sta_def_wmm_params(struct net_device *dev,
256 qparam.cw_max = 1023; 276 qparam.cw_max = 1023;
257 qparam.txop = 0; 277 qparam.txop = 0;
258 278
259 for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++) 279 for (i = 0; i < local_to_hw(local)->queues; i++)
260 local->ops->conf_tx(local_to_hw(local), 280 local->ops->conf_tx(local_to_hw(local), i, &qparam);
261 i + IEEE80211_TX_QUEUE_DATA0,
262 &qparam);
263
264 if (ibss) {
265 /* IBSS uses different parameters for Beacon sending */
266 qparam.cw_min++;
267 qparam.cw_min *= 2;
268 qparam.cw_min--;
269 local->ops->conf_tx(local_to_hw(local),
270 IEEE80211_TX_QUEUE_BEACON, &qparam);
271 }
272 } 281 }
273} 282}
274 283
@@ -282,6 +291,12 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
282 int count; 291 int count;
283 u8 *pos; 292 u8 *pos;
284 293
294 if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
295 return;
296
297 if (!wmm_param)
298 return;
299
285 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 300 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
286 return; 301 return;
287 count = wmm_param[6] & 0x0f; 302 count = wmm_param[6] & 0x0f;
@@ -305,37 +320,33 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
305 320
306 switch (aci) { 321 switch (aci) {
307 case 1: 322 case 1:
308 queue = IEEE80211_TX_QUEUE_DATA3; 323 queue = 3;
309 if (acm) { 324 if (acm)
310 local->wmm_acm |= BIT(0) | BIT(3); 325 local->wmm_acm |= BIT(0) | BIT(3);
311 }
312 break; 326 break;
313 case 2: 327 case 2:
314 queue = IEEE80211_TX_QUEUE_DATA1; 328 queue = 1;
315 if (acm) { 329 if (acm)
316 local->wmm_acm |= BIT(4) | BIT(5); 330 local->wmm_acm |= BIT(4) | BIT(5);
317 }
318 break; 331 break;
319 case 3: 332 case 3:
320 queue = IEEE80211_TX_QUEUE_DATA0; 333 queue = 0;
321 if (acm) { 334 if (acm)
322 local->wmm_acm |= BIT(6) | BIT(7); 335 local->wmm_acm |= BIT(6) | BIT(7);
323 }
324 break; 336 break;
325 case 0: 337 case 0:
326 default: 338 default:
327 queue = IEEE80211_TX_QUEUE_DATA2; 339 queue = 2;
328 if (acm) { 340 if (acm)
329 local->wmm_acm |= BIT(1) | BIT(2); 341 local->wmm_acm |= BIT(1) | BIT(2);
330 }
331 break; 342 break;
332 } 343 }
333 344
334 params.aifs = pos[0] & 0x0f; 345 params.aifs = pos[0] & 0x0f;
335 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 346 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
336 params.cw_min = ecw2cw(pos[1] & 0x0f); 347 params.cw_min = ecw2cw(pos[1] & 0x0f);
337 params.txop = pos[2] | (pos[3] << 8); 348 params.txop = get_unaligned_le16(pos + 2);
338#ifdef CONFIG_MAC80211_DEBUG 349#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
339 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 350 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
340 "cWmin=%d cWmax=%d txop=%d\n", 351 "cWmin=%d cWmax=%d txop=%d\n",
341 dev->name, queue, aci, acm, params.aifs, params.cw_min, 352 dev->name, queue, aci, acm, params.aifs, params.cw_min,
@@ -355,11 +366,14 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
355 bool use_short_preamble) 366 bool use_short_preamble)
356{ 367{
357 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; 368 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
369#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
358 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 370 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
359 DECLARE_MAC_BUF(mac); 371 DECLARE_MAC_BUF(mac);
372#endif
360 u32 changed = 0; 373 u32 changed = 0;
361 374
362 if (use_protection != bss_conf->use_cts_prot) { 375 if (use_protection != bss_conf->use_cts_prot) {
376#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
363 if (net_ratelimit()) { 377 if (net_ratelimit()) {
364 printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" 378 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
365 "%s)\n", 379 "%s)\n",
@@ -367,11 +381,13 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
367 use_protection ? "enabled" : "disabled", 381 use_protection ? "enabled" : "disabled",
368 print_mac(mac, ifsta->bssid)); 382 print_mac(mac, ifsta->bssid));
369 } 383 }
384#endif
370 bss_conf->use_cts_prot = use_protection; 385 bss_conf->use_cts_prot = use_protection;
371 changed |= BSS_CHANGED_ERP_CTS_PROT; 386 changed |= BSS_CHANGED_ERP_CTS_PROT;
372 } 387 }
373 388
374 if (use_short_preamble != bss_conf->use_short_preamble) { 389 if (use_short_preamble != bss_conf->use_short_preamble) {
390#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
375 if (net_ratelimit()) { 391 if (net_ratelimit()) {
376 printk(KERN_DEBUG "%s: switched to %s barker preamble" 392 printk(KERN_DEBUG "%s: switched to %s barker preamble"
377 " (BSSID=%s)\n", 393 " (BSSID=%s)\n",
@@ -379,6 +395,7 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
379 use_short_preamble ? "short" : "long", 395 use_short_preamble ? "short" : "long",
380 print_mac(mac, ifsta->bssid)); 396 print_mac(mac, ifsta->bssid));
381 } 397 }
398#endif
382 bss_conf->use_short_preamble = use_short_preamble; 399 bss_conf->use_short_preamble = use_short_preamble;
383 changed |= BSS_CHANGED_ERP_PREAMBLE; 400 changed |= BSS_CHANGED_ERP_PREAMBLE;
384 } 401 }
@@ -537,7 +554,7 @@ static void ieee80211_set_associated(struct net_device *dev,
537 554
538 changed |= ieee80211_handle_bss_capability(sdata, bss); 555 changed |= ieee80211_handle_bss_capability(sdata, bss);
539 556
540 ieee80211_rx_bss_put(dev, bss); 557 ieee80211_rx_bss_put(local, bss);
541 } 558 }
542 559
543 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { 560 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
@@ -555,7 +572,7 @@ static void ieee80211_set_associated(struct net_device *dev,
555 netif_carrier_off(dev); 572 netif_carrier_off(dev);
556 ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid); 573 ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid);
557 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 574 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
558 ieee80211_reset_erp_info(dev); 575 changed |= ieee80211_reset_erp_info(dev);
559 576
560 sdata->bss_conf.assoc_ht = 0; 577 sdata->bss_conf.assoc_ht = 0;
561 sdata->bss_conf.ht_conf = NULL; 578 sdata->bss_conf.ht_conf = NULL;
@@ -589,7 +606,7 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
589 int encrypt) 606 int encrypt)
590{ 607{
591 struct ieee80211_sub_if_data *sdata; 608 struct ieee80211_sub_if_data *sdata;
592 struct ieee80211_tx_packet_data *pkt_data; 609 struct ieee80211_tx_info *info;
593 610
594 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 611 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
595 skb->dev = sdata->local->mdev; 612 skb->dev = sdata->local->mdev;
@@ -597,11 +614,11 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
597 skb_set_network_header(skb, 0); 614 skb_set_network_header(skb, 0);
598 skb_set_transport_header(skb, 0); 615 skb_set_transport_header(skb, 0);
599 616
600 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 617 info = IEEE80211_SKB_CB(skb);
601 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); 618 memset(info, 0, sizeof(struct ieee80211_tx_info));
602 pkt_data->ifindex = sdata->dev->ifindex; 619 info->control.ifindex = sdata->dev->ifindex;
603 if (!encrypt) 620 if (!encrypt)
604 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; 621 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
605 622
606 dev_queue_xmit(skb); 623 dev_queue_xmit(skb);
607} 624}
@@ -730,9 +747,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
730 if (bss) { 747 if (bss) {
731 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 748 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
732 capab |= WLAN_CAPABILITY_PRIVACY; 749 capab |= WLAN_CAPABILITY_PRIVACY;
733 if (bss->wmm_ie) { 750 if (bss->wmm_ie)
734 wmm = 1; 751 wmm = 1;
735 }
736 752
737 /* get all rates supported by the device and the AP as 753 /* get all rates supported by the device and the AP as
738 * some APs don't like getting a superset of their rates 754 * some APs don't like getting a superset of their rates
@@ -740,7 +756,11 @@ static void ieee80211_send_assoc(struct net_device *dev,
740 * b-only mode) */ 756 * b-only mode) */
741 rates_len = ieee80211_compatible_rates(bss, sband, &rates); 757 rates_len = ieee80211_compatible_rates(bss, sband, &rates);
742 758
743 ieee80211_rx_bss_put(dev, bss); 759 if ((bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
760 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
761 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
762
763 ieee80211_rx_bss_put(local, bss);
744 } else { 764 } else {
745 rates = ~0; 765 rates = ~0;
746 rates_len = sband->n_bitrates; 766 rates_len = sband->n_bitrates;
@@ -807,6 +827,26 @@ static void ieee80211_send_assoc(struct net_device *dev,
807 } 827 }
808 } 828 }
809 829
830 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
831 /* 1. power capabilities */
832 pos = skb_put(skb, 4);
833 *pos++ = WLAN_EID_PWR_CAPABILITY;
834 *pos++ = 2;
835 *pos++ = 0; /* min tx power */
836 *pos++ = local->hw.conf.channel->max_power; /* max tx power */
837
838 /* 2. supported channels */
839 /* TODO: get this in reg domain format */
840 pos = skb_put(skb, 2 * sband->n_channels + 2);
841 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
842 *pos++ = 2 * sband->n_channels;
843 for (i = 0; i < sband->n_channels; i++) {
844 *pos++ = ieee80211_frequency_to_channel(
845 sband->channels[i].center_freq);
846 *pos++ = 1; /* one channel in the subband*/
847 }
848 }
849
810 if (ifsta->extra_ie) { 850 if (ifsta->extra_ie) {
811 pos = skb_put(skb, ifsta->extra_ie_len); 851 pos = skb_put(skb, ifsta->extra_ie_len);
812 memcpy(pos, ifsta->extra_ie, ifsta->extra_ie_len); 852 memcpy(pos, ifsta->extra_ie, ifsta->extra_ie_len);
@@ -824,9 +864,32 @@ static void ieee80211_send_assoc(struct net_device *dev,
824 *pos++ = 1; /* WME ver */ 864 *pos++ = 1; /* WME ver */
825 *pos++ = 0; 865 *pos++ = 0;
826 } 866 }
867
827 /* wmm support is a must to HT */ 868 /* wmm support is a must to HT */
828 if (wmm && sband->ht_info.ht_supported) { 869 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
829 __le16 tmp = cpu_to_le16(sband->ht_info.cap); 870 sband->ht_info.ht_supported && bss->ht_add_ie) {
871 struct ieee80211_ht_addt_info *ht_add_info =
872 (struct ieee80211_ht_addt_info *)bss->ht_add_ie;
873 u16 cap = sband->ht_info.cap;
874 __le16 tmp;
875 u32 flags = local->hw.conf.channel->flags;
876
877 switch (ht_add_info->ht_param & IEEE80211_HT_IE_CHA_SEC_OFFSET) {
878 case IEEE80211_HT_IE_CHA_SEC_ABOVE:
879 if (flags & IEEE80211_CHAN_NO_FAT_ABOVE) {
880 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH;
881 cap &= ~IEEE80211_HT_CAP_SGI_40;
882 }
883 break;
884 case IEEE80211_HT_IE_CHA_SEC_BELOW:
885 if (flags & IEEE80211_CHAN_NO_FAT_BELOW) {
886 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH;
887 cap &= ~IEEE80211_HT_CAP_SGI_40;
888 }
889 break;
890 }
891
892 tmp = cpu_to_le16(cap);
830 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); 893 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
831 *pos++ = WLAN_EID_HT_CAPABILITY; 894 *pos++ = WLAN_EID_HT_CAPABILITY;
832 *pos++ = sizeof(struct ieee80211_ht_cap); 895 *pos++ = sizeof(struct ieee80211_ht_cap);
@@ -929,7 +992,7 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
929 wep_privacy = !!ieee80211_sta_wep_configured(dev); 992 wep_privacy = !!ieee80211_sta_wep_configured(dev);
930 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); 993 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
931 994
932 ieee80211_rx_bss_put(dev, bss); 995 ieee80211_rx_bss_put(local, bss);
933 996
934 if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) 997 if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked))
935 return 0; 998 return 0;
@@ -1121,14 +1184,10 @@ static void ieee80211_auth_challenge(struct net_device *dev,
1121 u8 *pos; 1184 u8 *pos;
1122 struct ieee802_11_elems elems; 1185 struct ieee802_11_elems elems;
1123 1186
1124 printk(KERN_DEBUG "%s: replying to auth challenge\n", dev->name);
1125 pos = mgmt->u.auth.variable; 1187 pos = mgmt->u.auth.variable;
1126 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 1188 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1127 if (!elems.challenge) { 1189 if (!elems.challenge)
1128 printk(KERN_DEBUG "%s: no challenge IE in shared key auth "
1129 "frame\n", dev->name);
1130 return; 1190 return;
1131 }
1132 ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2, 1191 ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2,
1133 elems.challenge_len + 2, 1); 1192 elems.challenge_len + 2, 1);
1134} 1193}
@@ -1144,8 +1203,8 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1144 struct ieee80211_mgmt *mgmt; 1203 struct ieee80211_mgmt *mgmt;
1145 u16 capab; 1204 u16 capab;
1146 1205
1147 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1206 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1148 sizeof(mgmt->u.action.u.addba_resp)); 1207
1149 if (!skb) { 1208 if (!skb) {
1150 printk(KERN_DEBUG "%s: failed to allocate buffer " 1209 printk(KERN_DEBUG "%s: failed to allocate buffer "
1151 "for addba resp frame\n", dev->name); 1210 "for addba resp frame\n", dev->name);
@@ -1193,9 +1252,7 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
1193 struct ieee80211_mgmt *mgmt; 1252 struct ieee80211_mgmt *mgmt;
1194 u16 capab; 1253 u16 capab;
1195 1254
1196 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1255 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1197 sizeof(mgmt->u.action.u.addba_req));
1198
1199 1256
1200 if (!skb) { 1257 if (!skb) {
1201 printk(KERN_ERR "%s: failed to allocate buffer " 1258 printk(KERN_ERR "%s: failed to allocate buffer "
@@ -1296,7 +1353,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1296 1353
1297 1354
1298 /* examine state machine */ 1355 /* examine state machine */
1299 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1356 spin_lock_bh(&sta->lock);
1300 1357
1301 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { 1358 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
1302#ifdef CONFIG_MAC80211_HT_DEBUG 1359#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -1312,9 +1369,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1312 sta->ampdu_mlme.tid_rx[tid] = 1369 sta->ampdu_mlme.tid_rx[tid] =
1313 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); 1370 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
1314 if (!sta->ampdu_mlme.tid_rx[tid]) { 1371 if (!sta->ampdu_mlme.tid_rx[tid]) {
1372#ifdef CONFIG_MAC80211_HT_DEBUG
1315 if (net_ratelimit()) 1373 if (net_ratelimit())
1316 printk(KERN_ERR "allocate rx mlme to tid %d failed\n", 1374 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
1317 tid); 1375 tid);
1376#endif
1318 goto end; 1377 goto end;
1319 } 1378 }
1320 /* rx timer */ 1379 /* rx timer */
@@ -1330,9 +1389,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1330 tid_agg_rx->reorder_buf = 1389 tid_agg_rx->reorder_buf =
1331 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); 1390 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
1332 if (!tid_agg_rx->reorder_buf) { 1391 if (!tid_agg_rx->reorder_buf) {
1392#ifdef CONFIG_MAC80211_HT_DEBUG
1333 if (net_ratelimit()) 1393 if (net_ratelimit())
1334 printk(KERN_ERR "can not allocate reordering buffer " 1394 printk(KERN_ERR "can not allocate reordering buffer "
1335 "to tid %d\n", tid); 1395 "to tid %d\n", tid);
1396#endif
1336 kfree(sta->ampdu_mlme.tid_rx[tid]); 1397 kfree(sta->ampdu_mlme.tid_rx[tid]);
1337 goto end; 1398 goto end;
1338 } 1399 }
@@ -1363,7 +1424,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1363 tid_agg_rx->stored_mpdu_num = 0; 1424 tid_agg_rx->stored_mpdu_num = 0;
1364 status = WLAN_STATUS_SUCCESS; 1425 status = WLAN_STATUS_SUCCESS;
1365end: 1426end:
1366 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1427 spin_unlock_bh(&sta->lock);
1367 1428
1368end_no_lock: 1429end_no_lock:
1369 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, 1430 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
@@ -1395,18 +1456,16 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1395 1456
1396 state = &sta->ampdu_mlme.tid_state_tx[tid]; 1457 state = &sta->ampdu_mlme.tid_state_tx[tid];
1397 1458
1398 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1459 spin_lock_bh(&sta->lock);
1399 1460
1400 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 1461 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1401 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1462 spin_unlock_bh(&sta->lock);
1402 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
1403 "%d\n", *state);
1404 goto addba_resp_exit; 1463 goto addba_resp_exit;
1405 } 1464 }
1406 1465
1407 if (mgmt->u.action.u.addba_resp.dialog_token != 1466 if (mgmt->u.action.u.addba_resp.dialog_token !=
1408 sta->ampdu_mlme.tid_tx[tid]->dialog_token) { 1467 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
1409 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1468 spin_unlock_bh(&sta->lock);
1410#ifdef CONFIG_MAC80211_HT_DEBUG 1469#ifdef CONFIG_MAC80211_HT_DEBUG
1411 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 1470 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1412#endif /* CONFIG_MAC80211_HT_DEBUG */ 1471#endif /* CONFIG_MAC80211_HT_DEBUG */
@@ -1419,26 +1478,18 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1419#endif /* CONFIG_MAC80211_HT_DEBUG */ 1478#endif /* CONFIG_MAC80211_HT_DEBUG */
1420 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 1479 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1421 == WLAN_STATUS_SUCCESS) { 1480 == WLAN_STATUS_SUCCESS) {
1422 if (*state & HT_ADDBA_RECEIVED_MSK)
1423 printk(KERN_DEBUG "double addBA response\n");
1424
1425 *state |= HT_ADDBA_RECEIVED_MSK; 1481 *state |= HT_ADDBA_RECEIVED_MSK;
1426 sta->ampdu_mlme.addba_req_num[tid] = 0; 1482 sta->ampdu_mlme.addba_req_num[tid] = 0;
1427 1483
1428 if (*state == HT_AGG_STATE_OPERATIONAL) { 1484 if (*state == HT_AGG_STATE_OPERATIONAL)
1429 printk(KERN_DEBUG "Aggregation on for tid %d \n", tid);
1430 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 1485 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
1431 }
1432 1486
1433 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1487 spin_unlock_bh(&sta->lock);
1434 printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid);
1435 } else { 1488 } else {
1436 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
1437
1438 sta->ampdu_mlme.addba_req_num[tid]++; 1489 sta->ampdu_mlme.addba_req_num[tid]++;
1439 /* this will allow the state check in stop_BA_session */ 1490 /* this will allow the state check in stop_BA_session */
1440 *state = HT_AGG_STATE_OPERATIONAL; 1491 *state = HT_AGG_STATE_OPERATIONAL;
1441 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1492 spin_unlock_bh(&sta->lock);
1442 ieee80211_stop_tx_ba_session(hw, sta->addr, tid, 1493 ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
1443 WLAN_BACK_INITIATOR); 1494 WLAN_BACK_INITIATOR);
1444 } 1495 }
@@ -1457,8 +1508,7 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1457 struct ieee80211_mgmt *mgmt; 1508 struct ieee80211_mgmt *mgmt;
1458 u16 params; 1509 u16 params;
1459 1510
1460 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1511 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1461 sizeof(mgmt->u.action.u.delba));
1462 1512
1463 if (!skb) { 1513 if (!skb) {
1464 printk(KERN_ERR "%s: failed to allocate buffer " 1514 printk(KERN_ERR "%s: failed to allocate buffer "
@@ -1491,6 +1541,35 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1491 ieee80211_sta_tx(dev, skb, 0); 1541 ieee80211_sta_tx(dev, skb, 0);
1492} 1542}
1493 1543
1544void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn)
1545{
1546 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1547 struct sk_buff *skb;
1548 struct ieee80211_bar *bar;
1549 u16 bar_control = 0;
1550
1551 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
1552 if (!skb) {
1553 printk(KERN_ERR "%s: failed to allocate buffer for "
1554 "bar frame\n", dev->name);
1555 return;
1556 }
1557 skb_reserve(skb, local->hw.extra_tx_headroom);
1558 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
1559 memset(bar, 0, sizeof(*bar));
1560 bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL,
1561 IEEE80211_STYPE_BACK_REQ);
1562 memcpy(bar->ra, ra, ETH_ALEN);
1563 memcpy(bar->ta, dev->dev_addr, ETH_ALEN);
1564 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
1565 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
1566 bar_control |= (u16)(tid << 12);
1567 bar->control = cpu_to_le16(bar_control);
1568 bar->start_seq_num = cpu_to_le16(ssn);
1569
1570 ieee80211_sta_tx(dev, skb, 0);
1571}
1572
1494void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, 1573void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1495 u16 initiator, u16 reason) 1574 u16 initiator, u16 reason)
1496{ 1575{
@@ -1509,17 +1588,17 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1509 } 1588 }
1510 1589
1511 /* check if TID is in operational state */ 1590 /* check if TID is in operational state */
1512 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1591 spin_lock_bh(&sta->lock);
1513 if (sta->ampdu_mlme.tid_state_rx[tid] 1592 if (sta->ampdu_mlme.tid_state_rx[tid]
1514 != HT_AGG_STATE_OPERATIONAL) { 1593 != HT_AGG_STATE_OPERATIONAL) {
1515 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1594 spin_unlock_bh(&sta->lock);
1516 rcu_read_unlock(); 1595 rcu_read_unlock();
1517 return; 1596 return;
1518 } 1597 }
1519 sta->ampdu_mlme.tid_state_rx[tid] = 1598 sta->ampdu_mlme.tid_state_rx[tid] =
1520 HT_AGG_STATE_REQ_STOP_BA_MSK | 1599 HT_AGG_STATE_REQ_STOP_BA_MSK |
1521 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 1600 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
1522 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1601 spin_unlock_bh(&sta->lock);
1523 1602
1524 /* stop HW Rx aggregation. ampdu_action existence 1603 /* stop HW Rx aggregation. ampdu_action existence
1525 * already verified in session init so we add the BUG_ON */ 1604 * already verified in session init so we add the BUG_ON */
@@ -1534,7 +1613,7 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1534 ra, tid, NULL); 1613 ra, tid, NULL);
1535 if (ret) 1614 if (ret)
1536 printk(KERN_DEBUG "HW problem - can not stop rx " 1615 printk(KERN_DEBUG "HW problem - can not stop rx "
1537 "aggergation for tid %d\n", tid); 1616 "aggregation for tid %d\n", tid);
1538 1617
1539 /* shutdown timer has not expired */ 1618 /* shutdown timer has not expired */
1540 if (initiator != WLAN_BACK_TIMER) 1619 if (initiator != WLAN_BACK_TIMER)
@@ -1596,10 +1675,10 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1596 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, 1675 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
1597 WLAN_BACK_INITIATOR, 0); 1676 WLAN_BACK_INITIATOR, 0);
1598 else { /* WLAN_BACK_RECIPIENT */ 1677 else { /* WLAN_BACK_RECIPIENT */
1599 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1678 spin_lock_bh(&sta->lock);
1600 sta->ampdu_mlme.tid_state_tx[tid] = 1679 sta->ampdu_mlme.tid_state_tx[tid] =
1601 HT_AGG_STATE_OPERATIONAL; 1680 HT_AGG_STATE_OPERATIONAL;
1602 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1681 spin_unlock_bh(&sta->lock);
1603 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, 1682 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
1604 WLAN_BACK_RECIPIENT); 1683 WLAN_BACK_RECIPIENT);
1605 } 1684 }
@@ -1636,20 +1715,24 @@ void sta_addba_resp_timer_expired(unsigned long data)
1636 1715
1637 state = &sta->ampdu_mlme.tid_state_tx[tid]; 1716 state = &sta->ampdu_mlme.tid_state_tx[tid];
1638 /* check if the TID waits for addBA response */ 1717 /* check if the TID waits for addBA response */
1639 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1718 spin_lock_bh(&sta->lock);
1640 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 1719 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1641 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1720 spin_unlock_bh(&sta->lock);
1642 *state = HT_AGG_STATE_IDLE; 1721 *state = HT_AGG_STATE_IDLE;
1722#ifdef CONFIG_MAC80211_HT_DEBUG
1643 printk(KERN_DEBUG "timer expired on tid %d but we are not " 1723 printk(KERN_DEBUG "timer expired on tid %d but we are not "
1644 "expecting addBA response there", tid); 1724 "expecting addBA response there", tid);
1725#endif
1645 goto timer_expired_exit; 1726 goto timer_expired_exit;
1646 } 1727 }
1647 1728
1729#ifdef CONFIG_MAC80211_HT_DEBUG
1648 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); 1730 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
1731#endif
1649 1732
1650 /* go through the state check in stop_BA_session */ 1733 /* go through the state check in stop_BA_session */
1651 *state = HT_AGG_STATE_OPERATIONAL; 1734 *state = HT_AGG_STATE_OPERATIONAL;
1652 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1735 spin_unlock_bh(&sta->lock);
1653 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, 1736 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
1654 WLAN_BACK_INITIATOR); 1737 WLAN_BACK_INITIATOR);
1655 1738
@@ -1662,7 +1745,7 @@ timer_expired_exit:
1662 * resetting it after each frame that arrives from the originator. 1745 * resetting it after each frame that arrives from the originator.
1663 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. 1746 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
1664 */ 1747 */
1665void sta_rx_agg_session_timer_expired(unsigned long data) 1748static void sta_rx_agg_session_timer_expired(unsigned long data)
1666{ 1749{
1667 /* not an elegant detour, but there is no choice as the timer passes 1750 /* not an elegant detour, but there is no choice as the timer passes
1668 * only one argument, and various sta_info are needed here, so init 1751 * only one argument, and various sta_info are needed here, so init
@@ -1673,7 +1756,9 @@ void sta_rx_agg_session_timer_expired(unsigned long data)
1673 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 1756 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
1674 timer_to_tid[0]); 1757 timer_to_tid[0]);
1675 1758
1759#ifdef CONFIG_MAC80211_HT_DEBUG
1676 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 1760 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
1761#endif
1677 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, 1762 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
1678 (u16)*ptid, WLAN_BACK_TIMER, 1763 (u16)*ptid, WLAN_BACK_TIMER,
1679 WLAN_REASON_QSTA_TIMEOUT); 1764 WLAN_REASON_QSTA_TIMEOUT);
@@ -1693,6 +1778,71 @@ void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr)
1693 } 1778 }
1694} 1779}
1695 1780
1781static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
1782 struct ieee80211_msrment_ie *request_ie,
1783 const u8 *da, const u8 *bssid,
1784 u8 dialog_token)
1785{
1786 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1787 struct sk_buff *skb;
1788 struct ieee80211_mgmt *msr_report;
1789
1790 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
1791 sizeof(struct ieee80211_msrment_ie));
1792
1793 if (!skb) {
1794 printk(KERN_ERR "%s: failed to allocate buffer for "
1795 "measurement report frame\n", dev->name);
1796 return;
1797 }
1798
1799 skb_reserve(skb, local->hw.extra_tx_headroom);
1800 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
1801 memset(msr_report, 0, 24);
1802 memcpy(msr_report->da, da, ETH_ALEN);
1803 memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN);
1804 memcpy(msr_report->bssid, bssid, ETH_ALEN);
1805 msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1806 IEEE80211_STYPE_ACTION);
1807
1808 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
1809 msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
1810 msr_report->u.action.u.measurement.action_code =
1811 WLAN_ACTION_SPCT_MSR_RPRT;
1812 msr_report->u.action.u.measurement.dialog_token = dialog_token;
1813
1814 msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
1815 msr_report->u.action.u.measurement.length =
1816 sizeof(struct ieee80211_msrment_ie);
1817
1818 memset(&msr_report->u.action.u.measurement.msr_elem, 0,
1819 sizeof(struct ieee80211_msrment_ie));
1820 msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
1821 msr_report->u.action.u.measurement.msr_elem.mode |=
1822 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
1823 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
1824
1825 ieee80211_sta_tx(dev, skb, 0);
1826}
1827
1828static void ieee80211_sta_process_measurement_req(struct net_device *dev,
1829 struct ieee80211_mgmt *mgmt,
1830 size_t len)
1831{
1832 /*
1833 * Ignoring measurement request is spec violation.
1834 * Mandatory measurements must be reported optional
1835 * measurements might be refused or reported incapable
1836 * For now just refuse
1837 * TODO: Answer basic measurement as unmeasured
1838 */
1839 ieee80211_send_refuse_measurement_request(dev,
1840 &mgmt->u.action.u.measurement.msr_elem,
1841 mgmt->sa, mgmt->bssid,
1842 mgmt->u.action.u.measurement.dialog_token);
1843}
1844
1845
1696static void ieee80211_rx_mgmt_auth(struct net_device *dev, 1846static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1697 struct ieee80211_if_sta *ifsta, 1847 struct ieee80211_if_sta *ifsta,
1698 struct ieee80211_mgmt *mgmt, 1848 struct ieee80211_mgmt *mgmt,
@@ -1703,73 +1853,41 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1703 DECLARE_MAC_BUF(mac); 1853 DECLARE_MAC_BUF(mac);
1704 1854
1705 if (ifsta->state != IEEE80211_AUTHENTICATE && 1855 if (ifsta->state != IEEE80211_AUTHENTICATE &&
1706 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { 1856 sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1707 printk(KERN_DEBUG "%s: authentication frame received from "
1708 "%s, but not in authenticate state - ignored\n",
1709 dev->name, print_mac(mac, mgmt->sa));
1710 return; 1857 return;
1711 }
1712 1858
1713 if (len < 24 + 6) { 1859 if (len < 24 + 6)
1714 printk(KERN_DEBUG "%s: too short (%zd) authentication frame "
1715 "received from %s - ignored\n",
1716 dev->name, len, print_mac(mac, mgmt->sa));
1717 return; 1860 return;
1718 }
1719 1861
1720 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1862 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
1721 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 1863 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1722 printk(KERN_DEBUG "%s: authentication frame received from "
1723 "unknown AP (SA=%s BSSID=%s) - "
1724 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1725 print_mac(mac, mgmt->bssid));
1726 return; 1864 return;
1727 }
1728 1865
1729 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1866 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
1730 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) { 1867 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
1731 printk(KERN_DEBUG "%s: authentication frame received from "
1732 "unknown BSSID (SA=%s BSSID=%s) - "
1733 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1734 print_mac(mac, mgmt->bssid));
1735 return; 1868 return;
1736 }
1737 1869
1738 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 1870 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1739 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 1871 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1740 status_code = le16_to_cpu(mgmt->u.auth.status_code); 1872 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1741 1873
1742 printk(KERN_DEBUG "%s: RX authentication from %s (alg=%d "
1743 "transaction=%d status=%d)\n",
1744 dev->name, print_mac(mac, mgmt->sa), auth_alg,
1745 auth_transaction, status_code);
1746
1747 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 1874 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
1748 /* IEEE 802.11 standard does not require authentication in IBSS 1875 /*
1876 * IEEE 802.11 standard does not require authentication in IBSS
1749 * networks and most implementations do not seem to use it. 1877 * networks and most implementations do not seem to use it.
1750 * However, try to reply to authentication attempts if someone 1878 * However, try to reply to authentication attempts if someone
1751 * has actually implemented this. 1879 * has actually implemented this.
1752 * TODO: Could implement shared key authentication. */ 1880 */
1753 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) { 1881 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
1754 printk(KERN_DEBUG "%s: unexpected IBSS authentication "
1755 "frame (alg=%d transaction=%d)\n",
1756 dev->name, auth_alg, auth_transaction);
1757 return; 1882 return;
1758 }
1759 ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); 1883 ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0);
1760 } 1884 }
1761 1885
1762 if (auth_alg != ifsta->auth_alg || 1886 if (auth_alg != ifsta->auth_alg ||
1763 auth_transaction != ifsta->auth_transaction) { 1887 auth_transaction != ifsta->auth_transaction)
1764 printk(KERN_DEBUG "%s: unexpected authentication frame "
1765 "(alg=%d transaction=%d)\n",
1766 dev->name, auth_alg, auth_transaction);
1767 return; 1888 return;
1768 }
1769 1889
1770 if (status_code != WLAN_STATUS_SUCCESS) { 1890 if (status_code != WLAN_STATUS_SUCCESS) {
1771 printk(KERN_DEBUG "%s: AP denied authentication (auth_alg=%d "
1772 "code=%d)\n", dev->name, ifsta->auth_alg, status_code);
1773 if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { 1891 if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
1774 u8 algs[3]; 1892 u8 algs[3];
1775 const int num_algs = ARRAY_SIZE(algs); 1893 const int num_algs = ARRAY_SIZE(algs);
@@ -1798,9 +1916,6 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1798 !ieee80211_sta_wep_configured(dev)) 1916 !ieee80211_sta_wep_configured(dev))
1799 continue; 1917 continue;
1800 ifsta->auth_alg = algs[pos]; 1918 ifsta->auth_alg = algs[pos];
1801 printk(KERN_DEBUG "%s: set auth_alg=%d for "
1802 "next try\n",
1803 dev->name, ifsta->auth_alg);
1804 break; 1919 break;
1805 } 1920 }
1806 } 1921 }
@@ -1830,30 +1945,16 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev,
1830 u16 reason_code; 1945 u16 reason_code;
1831 DECLARE_MAC_BUF(mac); 1946 DECLARE_MAC_BUF(mac);
1832 1947
1833 if (len < 24 + 2) { 1948 if (len < 24 + 2)
1834 printk(KERN_DEBUG "%s: too short (%zd) deauthentication frame "
1835 "received from %s - ignored\n",
1836 dev->name, len, print_mac(mac, mgmt->sa));
1837 return; 1949 return;
1838 }
1839 1950
1840 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 1951 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN))
1841 printk(KERN_DEBUG "%s: deauthentication frame received from "
1842 "unknown AP (SA=%s BSSID=%s) - "
1843 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1844 print_mac(mac, mgmt->bssid));
1845 return; 1952 return;
1846 }
1847 1953
1848 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 1954 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1849 1955
1850 printk(KERN_DEBUG "%s: RX deauthentication from %s" 1956 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
1851 " (reason=%d)\n",
1852 dev->name, print_mac(mac, mgmt->sa), reason_code);
1853
1854 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) {
1855 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); 1957 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name);
1856 }
1857 1958
1858 if (ifsta->state == IEEE80211_AUTHENTICATE || 1959 if (ifsta->state == IEEE80211_AUTHENTICATE ||
1859 ifsta->state == IEEE80211_ASSOCIATE || 1960 ifsta->state == IEEE80211_ASSOCIATE ||
@@ -1876,27 +1977,14 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev,
1876 u16 reason_code; 1977 u16 reason_code;
1877 DECLARE_MAC_BUF(mac); 1978 DECLARE_MAC_BUF(mac);
1878 1979
1879 if (len < 24 + 2) { 1980 if (len < 24 + 2)
1880 printk(KERN_DEBUG "%s: too short (%zd) disassociation frame "
1881 "received from %s - ignored\n",
1882 dev->name, len, print_mac(mac, mgmt->sa));
1883 return; 1981 return;
1884 }
1885 1982
1886 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 1983 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN))
1887 printk(KERN_DEBUG "%s: disassociation frame received from "
1888 "unknown AP (SA=%s BSSID=%s) - "
1889 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1890 print_mac(mac, mgmt->bssid));
1891 return; 1984 return;
1892 }
1893 1985
1894 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1986 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1895 1987
1896 printk(KERN_DEBUG "%s: RX disassociation from %s"
1897 " (reason=%d)\n",
1898 dev->name, print_mac(mac, mgmt->sa), reason_code);
1899
1900 if (ifsta->flags & IEEE80211_STA_ASSOCIATED) 1988 if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
1901 printk(KERN_DEBUG "%s: disassociated\n", dev->name); 1989 printk(KERN_DEBUG "%s: disassociated\n", dev->name);
1902 1990
@@ -1932,27 +2020,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1932 /* AssocResp and ReassocResp have identical structure, so process both 2020 /* AssocResp and ReassocResp have identical structure, so process both
1933 * of them in this function. */ 2021 * of them in this function. */
1934 2022
1935 if (ifsta->state != IEEE80211_ASSOCIATE) { 2023 if (ifsta->state != IEEE80211_ASSOCIATE)
1936 printk(KERN_DEBUG "%s: association frame received from "
1937 "%s, but not in associate state - ignored\n",
1938 dev->name, print_mac(mac, mgmt->sa));
1939 return; 2024 return;
1940 }
1941 2025
1942 if (len < 24 + 6) { 2026 if (len < 24 + 6)
1943 printk(KERN_DEBUG "%s: too short (%zd) association frame "
1944 "received from %s - ignored\n",
1945 dev->name, len, print_mac(mac, mgmt->sa));
1946 return; 2027 return;
1947 }
1948 2028
1949 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 2029 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1950 printk(KERN_DEBUG "%s: association frame received from "
1951 "unknown AP (SA=%s BSSID=%s) - "
1952 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1953 print_mac(mac, mgmt->bssid));
1954 return; 2030 return;
1955 }
1956 2031
1957 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 2032 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1958 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); 2033 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
@@ -2016,10 +2091,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2016 local->hw.conf.channel->center_freq, 2091 local->hw.conf.channel->center_freq,
2017 ifsta->ssid, ifsta->ssid_len); 2092 ifsta->ssid, ifsta->ssid_len);
2018 if (bss) { 2093 if (bss) {
2019 sta->last_rssi = bss->rssi;
2020 sta->last_signal = bss->signal; 2094 sta->last_signal = bss->signal;
2095 sta->last_qual = bss->qual;
2021 sta->last_noise = bss->noise; 2096 sta->last_noise = bss->noise;
2022 ieee80211_rx_bss_put(dev, bss); 2097 ieee80211_rx_bss_put(local, bss);
2023 } 2098 }
2024 2099
2025 err = sta_info_insert(sta); 2100 err = sta_info_insert(sta);
@@ -2041,8 +2116,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2041 * to between the sta_info_alloc() and sta_info_insert() above. 2116 * to between the sta_info_alloc() and sta_info_insert() above.
2042 */ 2117 */
2043 2118
2044 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | 2119 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP |
2045 WLAN_STA_AUTHORIZED; 2120 WLAN_STA_AUTHORIZED);
2046 2121
2047 rates = 0; 2122 rates = 0;
2048 basic_rates = 0; 2123 basic_rates = 0;
@@ -2086,7 +2161,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2086 else 2161 else
2087 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; 2162 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
2088 2163
2089 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param) { 2164 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
2165 (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
2090 struct ieee80211_ht_bss_info bss_info; 2166 struct ieee80211_ht_bss_info bss_info;
2091 ieee80211_ht_cap_ie_to_ht_info( 2167 ieee80211_ht_cap_ie_to_ht_info(
2092 (struct ieee80211_ht_cap *) 2168 (struct ieee80211_ht_cap *)
@@ -2099,8 +2175,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2099 2175
2100 rate_control_rate_init(sta, local); 2176 rate_control_rate_init(sta, local);
2101 2177
2102 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { 2178 if (elems.wmm_param) {
2103 sta->flags |= WLAN_STA_WME; 2179 set_sta_flags(sta, WLAN_STA_WME);
2104 rcu_read_unlock(); 2180 rcu_read_unlock();
2105 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 2181 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2106 elems.wmm_param_len); 2182 elems.wmm_param_len);
@@ -2136,10 +2212,9 @@ static void __ieee80211_rx_bss_hash_add(struct net_device *dev,
2136 2212
2137 2213
2138/* Caller must hold local->sta_bss_lock */ 2214/* Caller must hold local->sta_bss_lock */
2139static void __ieee80211_rx_bss_hash_del(struct net_device *dev, 2215static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
2140 struct ieee80211_sta_bss *bss) 2216 struct ieee80211_sta_bss *bss)
2141{ 2217{
2142 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2143 struct ieee80211_sta_bss *b, *prev = NULL; 2218 struct ieee80211_sta_bss *b, *prev = NULL;
2144 b = local->sta_bss_hash[STA_HASH(bss->bssid)]; 2219 b = local->sta_bss_hash[STA_HASH(bss->bssid)];
2145 while (b) { 2220 while (b) {
@@ -2284,45 +2359,42 @@ static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
2284 kfree(bss->rsn_ie); 2359 kfree(bss->rsn_ie);
2285 kfree(bss->wmm_ie); 2360 kfree(bss->wmm_ie);
2286 kfree(bss->ht_ie); 2361 kfree(bss->ht_ie);
2362 kfree(bss->ht_add_ie);
2287 kfree(bss_mesh_id(bss)); 2363 kfree(bss_mesh_id(bss));
2288 kfree(bss_mesh_cfg(bss)); 2364 kfree(bss_mesh_cfg(bss));
2289 kfree(bss); 2365 kfree(bss);
2290} 2366}
2291 2367
2292 2368
2293static void ieee80211_rx_bss_put(struct net_device *dev, 2369static void ieee80211_rx_bss_put(struct ieee80211_local *local,
2294 struct ieee80211_sta_bss *bss) 2370 struct ieee80211_sta_bss *bss)
2295{ 2371{
2296 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2297
2298 local_bh_disable(); 2372 local_bh_disable();
2299 if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) { 2373 if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) {
2300 local_bh_enable(); 2374 local_bh_enable();
2301 return; 2375 return;
2302 } 2376 }
2303 2377
2304 __ieee80211_rx_bss_hash_del(dev, bss); 2378 __ieee80211_rx_bss_hash_del(local, bss);
2305 list_del(&bss->list); 2379 list_del(&bss->list);
2306 spin_unlock_bh(&local->sta_bss_lock); 2380 spin_unlock_bh(&local->sta_bss_lock);
2307 ieee80211_rx_bss_free(bss); 2381 ieee80211_rx_bss_free(bss);
2308} 2382}
2309 2383
2310 2384
2311void ieee80211_rx_bss_list_init(struct net_device *dev) 2385void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
2312{ 2386{
2313 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2314 spin_lock_init(&local->sta_bss_lock); 2387 spin_lock_init(&local->sta_bss_lock);
2315 INIT_LIST_HEAD(&local->sta_bss_list); 2388 INIT_LIST_HEAD(&local->sta_bss_list);
2316} 2389}
2317 2390
2318 2391
2319void ieee80211_rx_bss_list_deinit(struct net_device *dev) 2392void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
2320{ 2393{
2321 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2322 struct ieee80211_sta_bss *bss, *tmp; 2394 struct ieee80211_sta_bss *bss, *tmp;
2323 2395
2324 list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list) 2396 list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list)
2325 ieee80211_rx_bss_put(dev, bss); 2397 ieee80211_rx_bss_put(local, bss);
2326} 2398}
2327 2399
2328 2400
@@ -2334,8 +2406,6 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2334 int res, rates, i, j; 2406 int res, rates, i, j;
2335 struct sk_buff *skb; 2407 struct sk_buff *skb;
2336 struct ieee80211_mgmt *mgmt; 2408 struct ieee80211_mgmt *mgmt;
2337 struct ieee80211_tx_control control;
2338 struct rate_selection ratesel;
2339 u8 *pos; 2409 u8 *pos;
2340 struct ieee80211_sub_if_data *sdata; 2410 struct ieee80211_sub_if_data *sdata;
2341 struct ieee80211_supported_band *sband; 2411 struct ieee80211_supported_band *sband;
@@ -2353,7 +2423,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2353 local->ops->reset_tsf(local_to_hw(local)); 2423 local->ops->reset_tsf(local_to_hw(local));
2354 } 2424 }
2355 memcpy(ifsta->bssid, bss->bssid, ETH_ALEN); 2425 memcpy(ifsta->bssid, bss->bssid, ETH_ALEN);
2356 res = ieee80211_if_config(dev); 2426 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
2357 if (res) 2427 if (res)
2358 return res; 2428 return res;
2359 2429
@@ -2367,24 +2437,22 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2367 if (res) 2437 if (res)
2368 return res; 2438 return res;
2369 2439
2370 /* Set beacon template */ 2440 /* Build IBSS probe response */
2371 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 2441 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
2372 do { 2442 if (skb) {
2373 if (!skb)
2374 break;
2375
2376 skb_reserve(skb, local->hw.extra_tx_headroom); 2443 skb_reserve(skb, local->hw.extra_tx_headroom);
2377 2444
2378 mgmt = (struct ieee80211_mgmt *) 2445 mgmt = (struct ieee80211_mgmt *)
2379 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 2446 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2380 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 2447 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2381 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 2448 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2382 IEEE80211_STYPE_BEACON); 2449 IEEE80211_STYPE_PROBE_RESP);
2383 memset(mgmt->da, 0xff, ETH_ALEN); 2450 memset(mgmt->da, 0xff, ETH_ALEN);
2384 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 2451 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
2385 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 2452 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2386 mgmt->u.beacon.beacon_int = 2453 mgmt->u.beacon.beacon_int =
2387 cpu_to_le16(local->hw.conf.beacon_int); 2454 cpu_to_le16(local->hw.conf.beacon_int);
2455 mgmt->u.beacon.timestamp = cpu_to_le64(bss->timestamp);
2388 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability); 2456 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability);
2389 2457
2390 pos = skb_put(skb, 2 + ifsta->ssid_len); 2458 pos = skb_put(skb, 2 + ifsta->ssid_len);
@@ -2422,60 +2490,22 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2422 memcpy(pos, &bss->supp_rates[8], rates); 2490 memcpy(pos, &bss->supp_rates[8], rates);
2423 } 2491 }
2424 2492
2425 memset(&control, 0, sizeof(control)); 2493 ifsta->probe_resp = skb;
2426 rate_control_get_rate(dev, sband, skb, &ratesel);
2427 if (!ratesel.rate) {
2428 printk(KERN_DEBUG "%s: Failed to determine TX rate "
2429 "for IBSS beacon\n", dev->name);
2430 break;
2431 }
2432 control.vif = &sdata->vif;
2433 control.tx_rate = ratesel.rate;
2434 if (sdata->bss_conf.use_short_preamble &&
2435 ratesel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
2436 control.flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
2437 control.antenna_sel_tx = local->hw.conf.antenna_sel_tx;
2438 control.flags |= IEEE80211_TXCTL_NO_ACK;
2439 control.retry_limit = 1;
2440
2441 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC);
2442 if (ifsta->probe_resp) {
2443 mgmt = (struct ieee80211_mgmt *)
2444 ifsta->probe_resp->data;
2445 mgmt->frame_control =
2446 IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2447 IEEE80211_STYPE_PROBE_RESP);
2448 } else {
2449 printk(KERN_DEBUG "%s: Could not allocate ProbeResp "
2450 "template for IBSS\n", dev->name);
2451 }
2452
2453 if (local->ops->beacon_update &&
2454 local->ops->beacon_update(local_to_hw(local),
2455 skb, &control) == 0) {
2456 printk(KERN_DEBUG "%s: Configured IBSS beacon "
2457 "template\n", dev->name);
2458 skb = NULL;
2459 }
2460
2461 rates = 0;
2462 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2463 for (i = 0; i < bss->supp_rates_len; i++) {
2464 int bitrate = (bss->supp_rates[i] & 0x7f) * 5;
2465 for (j = 0; j < sband->n_bitrates; j++)
2466 if (sband->bitrates[j].bitrate == bitrate)
2467 rates |= BIT(j);
2468 }
2469 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
2470 2494
2471 ieee80211_sta_def_wmm_params(dev, bss, 1); 2495 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
2472 } while (0); 2496 }
2473 2497
2474 if (skb) { 2498 rates = 0;
2475 printk(KERN_DEBUG "%s: Failed to configure IBSS beacon " 2499 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2476 "template\n", dev->name); 2500 for (i = 0; i < bss->supp_rates_len; i++) {
2477 dev_kfree_skb(skb); 2501 int bitrate = (bss->supp_rates[i] & 0x7f) * 5;
2502 for (j = 0; j < sband->n_bitrates; j++)
2503 if (sband->bitrates[j].bitrate == bitrate)
2504 rates |= BIT(j);
2478 } 2505 }
2506 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
2507
2508 ieee80211_sta_def_wmm_params(dev, bss, 1);
2479 2509
2480 ifsta->state = IEEE80211_IBSS_JOINED; 2510 ifsta->state = IEEE80211_IBSS_JOINED;
2481 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 2511 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
@@ -2528,11 +2558,10 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2528 struct ieee80211_mgmt *mgmt, 2558 struct ieee80211_mgmt *mgmt,
2529 size_t len, 2559 size_t len,
2530 struct ieee80211_rx_status *rx_status, 2560 struct ieee80211_rx_status *rx_status,
2561 struct ieee802_11_elems *elems,
2531 int beacon) 2562 int beacon)
2532{ 2563{
2533 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2564 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2534 struct ieee802_11_elems elems;
2535 size_t baselen;
2536 int freq, clen; 2565 int freq, clen;
2537 struct ieee80211_sta_bss *bss; 2566 struct ieee80211_sta_bss *bss;
2538 struct sta_info *sta; 2567 struct sta_info *sta;
@@ -2545,35 +2574,24 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2545 if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN)) 2574 if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN))
2546 return; /* ignore ProbeResp to foreign address */ 2575 return; /* ignore ProbeResp to foreign address */
2547 2576
2548#if 0
2549 printk(KERN_DEBUG "%s: RX %s from %s to %s\n",
2550 dev->name, beacon ? "Beacon" : "Probe Response",
2551 print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da));
2552#endif
2553
2554 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
2555 if (baselen > len)
2556 return;
2557
2558 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); 2577 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
2559 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2560 2578
2561 if (ieee80211_vif_is_mesh(&sdata->vif) && elems.mesh_id && 2579 if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id &&
2562 elems.mesh_config && mesh_matches_local(&elems, dev)) { 2580 elems->mesh_config && mesh_matches_local(elems, dev)) {
2563 u64 rates = ieee80211_sta_get_rates(local, &elems, 2581 u64 rates = ieee80211_sta_get_rates(local, elems,
2564 rx_status->band); 2582 rx_status->band);
2565 2583
2566 mesh_neighbour_update(mgmt->sa, rates, dev, 2584 mesh_neighbour_update(mgmt->sa, rates, dev,
2567 mesh_peer_accepts_plinks(&elems, dev)); 2585 mesh_peer_accepts_plinks(elems, dev));
2568 } 2586 }
2569 2587
2570 rcu_read_lock(); 2588 rcu_read_lock();
2571 2589
2572 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && 2590 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates &&
2573 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && 2591 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
2574 (sta = sta_info_get(local, mgmt->sa))) { 2592 (sta = sta_info_get(local, mgmt->sa))) {
2575 u64 prev_rates; 2593 u64 prev_rates;
2576 u64 supp_rates = ieee80211_sta_get_rates(local, &elems, 2594 u64 supp_rates = ieee80211_sta_get_rates(local, elems,
2577 rx_status->band); 2595 rx_status->band);
2578 2596
2579 prev_rates = sta->supp_rates[rx_status->band]; 2597 prev_rates = sta->supp_rates[rx_status->band];
@@ -2585,21 +2603,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2585 sta->supp_rates[rx_status->band] = 2603 sta->supp_rates[rx_status->band] =
2586 sdata->u.sta.supp_rates_bits[rx_status->band]; 2604 sdata->u.sta.supp_rates_bits[rx_status->band];
2587 } 2605 }
2588 if (sta->supp_rates[rx_status->band] != prev_rates) {
2589 printk(KERN_DEBUG "%s: updated supp_rates set for "
2590 "%s based on beacon info (0x%llx & 0x%llx -> "
2591 "0x%llx)\n",
2592 dev->name, print_mac(mac, sta->addr),
2593 (unsigned long long) prev_rates,
2594 (unsigned long long) supp_rates,
2595 (unsigned long long) sta->supp_rates[rx_status->band]);
2596 }
2597 } 2606 }
2598 2607
2599 rcu_read_unlock(); 2608 rcu_read_unlock();
2600 2609
2601 if (elems.ds_params && elems.ds_params_len == 1) 2610 if (elems->ds_params && elems->ds_params_len == 1)
2602 freq = ieee80211_channel_to_frequency(elems.ds_params[0]); 2611 freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
2603 else 2612 else
2604 freq = rx_status->freq; 2613 freq = rx_status->freq;
2605 2614
@@ -2609,23 +2618,23 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2609 return; 2618 return;
2610 2619
2611#ifdef CONFIG_MAC80211_MESH 2620#ifdef CONFIG_MAC80211_MESH
2612 if (elems.mesh_config) 2621 if (elems->mesh_config)
2613 bss = ieee80211_rx_mesh_bss_get(dev, elems.mesh_id, 2622 bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id,
2614 elems.mesh_id_len, elems.mesh_config, freq); 2623 elems->mesh_id_len, elems->mesh_config, freq);
2615 else 2624 else
2616#endif 2625#endif
2617 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, 2626 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq,
2618 elems.ssid, elems.ssid_len); 2627 elems->ssid, elems->ssid_len);
2619 if (!bss) { 2628 if (!bss) {
2620#ifdef CONFIG_MAC80211_MESH 2629#ifdef CONFIG_MAC80211_MESH
2621 if (elems.mesh_config) 2630 if (elems->mesh_config)
2622 bss = ieee80211_rx_mesh_bss_add(dev, elems.mesh_id, 2631 bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id,
2623 elems.mesh_id_len, elems.mesh_config, 2632 elems->mesh_id_len, elems->mesh_config,
2624 elems.mesh_config_len, freq); 2633 elems->mesh_config_len, freq);
2625 else 2634 else
2626#endif 2635#endif
2627 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, 2636 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq,
2628 elems.ssid, elems.ssid_len); 2637 elems->ssid, elems->ssid_len);
2629 if (!bss) 2638 if (!bss)
2630 return; 2639 return;
2631 } else { 2640 } else {
@@ -2638,46 +2647,66 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2638 } 2647 }
2639 2648
2640 /* save the ERP value so that it is available at association time */ 2649 /* save the ERP value so that it is available at association time */
2641 if (elems.erp_info && elems.erp_info_len >= 1) { 2650 if (elems->erp_info && elems->erp_info_len >= 1) {
2642 bss->erp_value = elems.erp_info[0]; 2651 bss->erp_value = elems->erp_info[0];
2643 bss->has_erp_value = 1; 2652 bss->has_erp_value = 1;
2644 } 2653 }
2645 2654
2646 if (elems.ht_cap_elem && 2655 if (elems->ht_cap_elem &&
2647 (!bss->ht_ie || bss->ht_ie_len != elems.ht_cap_elem_len || 2656 (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len ||
2648 memcmp(bss->ht_ie, elems.ht_cap_elem, elems.ht_cap_elem_len))) { 2657 memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) {
2649 kfree(bss->ht_ie); 2658 kfree(bss->ht_ie);
2650 bss->ht_ie = kmalloc(elems.ht_cap_elem_len + 2, GFP_ATOMIC); 2659 bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC);
2651 if (bss->ht_ie) { 2660 if (bss->ht_ie) {
2652 memcpy(bss->ht_ie, elems.ht_cap_elem - 2, 2661 memcpy(bss->ht_ie, elems->ht_cap_elem - 2,
2653 elems.ht_cap_elem_len + 2); 2662 elems->ht_cap_elem_len + 2);
2654 bss->ht_ie_len = elems.ht_cap_elem_len + 2; 2663 bss->ht_ie_len = elems->ht_cap_elem_len + 2;
2655 } else 2664 } else
2656 bss->ht_ie_len = 0; 2665 bss->ht_ie_len = 0;
2657 } else if (!elems.ht_cap_elem && bss->ht_ie) { 2666 } else if (!elems->ht_cap_elem && bss->ht_ie) {
2658 kfree(bss->ht_ie); 2667 kfree(bss->ht_ie);
2659 bss->ht_ie = NULL; 2668 bss->ht_ie = NULL;
2660 bss->ht_ie_len = 0; 2669 bss->ht_ie_len = 0;
2661 } 2670 }
2662 2671
2672 if (elems->ht_info_elem &&
2673 (!bss->ht_add_ie ||
2674 bss->ht_add_ie_len != elems->ht_info_elem_len ||
2675 memcmp(bss->ht_add_ie, elems->ht_info_elem,
2676 elems->ht_info_elem_len))) {
2677 kfree(bss->ht_add_ie);
2678 bss->ht_add_ie =
2679 kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC);
2680 if (bss->ht_add_ie) {
2681 memcpy(bss->ht_add_ie, elems->ht_info_elem - 2,
2682 elems->ht_info_elem_len + 2);
2683 bss->ht_add_ie_len = elems->ht_info_elem_len + 2;
2684 } else
2685 bss->ht_add_ie_len = 0;
2686 } else if (!elems->ht_info_elem && bss->ht_add_ie) {
2687 kfree(bss->ht_add_ie);
2688 bss->ht_add_ie = NULL;
2689 bss->ht_add_ie_len = 0;
2690 }
2691
2663 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 2692 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
2664 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 2693 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
2665 2694
2666 bss->supp_rates_len = 0; 2695 bss->supp_rates_len = 0;
2667 if (elems.supp_rates) { 2696 if (elems->supp_rates) {
2668 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 2697 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2669 if (clen > elems.supp_rates_len) 2698 if (clen > elems->supp_rates_len)
2670 clen = elems.supp_rates_len; 2699 clen = elems->supp_rates_len;
2671 memcpy(&bss->supp_rates[bss->supp_rates_len], elems.supp_rates, 2700 memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
2672 clen); 2701 clen);
2673 bss->supp_rates_len += clen; 2702 bss->supp_rates_len += clen;
2674 } 2703 }
2675 if (elems.ext_supp_rates) { 2704 if (elems->ext_supp_rates) {
2676 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 2705 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2677 if (clen > elems.ext_supp_rates_len) 2706 if (clen > elems->ext_supp_rates_len)
2678 clen = elems.ext_supp_rates_len; 2707 clen = elems->ext_supp_rates_len;
2679 memcpy(&bss->supp_rates[bss->supp_rates_len], 2708 memcpy(&bss->supp_rates[bss->supp_rates_len],
2680 elems.ext_supp_rates, clen); 2709 elems->ext_supp_rates, clen);
2681 bss->supp_rates_len += clen; 2710 bss->supp_rates_len += clen;
2682 } 2711 }
2683 2712
@@ -2685,9 +2714,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2685 2714
2686 bss->timestamp = beacon_timestamp; 2715 bss->timestamp = beacon_timestamp;
2687 bss->last_update = jiffies; 2716 bss->last_update = jiffies;
2688 bss->rssi = rx_status->ssi;
2689 bss->signal = rx_status->signal; 2717 bss->signal = rx_status->signal;
2690 bss->noise = rx_status->noise; 2718 bss->noise = rx_status->noise;
2719 bss->qual = rx_status->qual;
2691 if (!beacon && !bss->probe_resp) 2720 if (!beacon && !bss->probe_resp)
2692 bss->probe_resp = true; 2721 bss->probe_resp = true;
2693 2722
@@ -2697,37 +2726,37 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2697 */ 2726 */
2698 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 2727 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
2699 bss->probe_resp && beacon) { 2728 bss->probe_resp && beacon) {
2700 ieee80211_rx_bss_put(dev, bss); 2729 ieee80211_rx_bss_put(local, bss);
2701 return; 2730 return;
2702 } 2731 }
2703 2732
2704 if (elems.wpa && 2733 if (elems->wpa &&
2705 (!bss->wpa_ie || bss->wpa_ie_len != elems.wpa_len || 2734 (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len ||
2706 memcmp(bss->wpa_ie, elems.wpa, elems.wpa_len))) { 2735 memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) {
2707 kfree(bss->wpa_ie); 2736 kfree(bss->wpa_ie);
2708 bss->wpa_ie = kmalloc(elems.wpa_len + 2, GFP_ATOMIC); 2737 bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC);
2709 if (bss->wpa_ie) { 2738 if (bss->wpa_ie) {
2710 memcpy(bss->wpa_ie, elems.wpa - 2, elems.wpa_len + 2); 2739 memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2);
2711 bss->wpa_ie_len = elems.wpa_len + 2; 2740 bss->wpa_ie_len = elems->wpa_len + 2;
2712 } else 2741 } else
2713 bss->wpa_ie_len = 0; 2742 bss->wpa_ie_len = 0;
2714 } else if (!elems.wpa && bss->wpa_ie) { 2743 } else if (!elems->wpa && bss->wpa_ie) {
2715 kfree(bss->wpa_ie); 2744 kfree(bss->wpa_ie);
2716 bss->wpa_ie = NULL; 2745 bss->wpa_ie = NULL;
2717 bss->wpa_ie_len = 0; 2746 bss->wpa_ie_len = 0;
2718 } 2747 }
2719 2748
2720 if (elems.rsn && 2749 if (elems->rsn &&
2721 (!bss->rsn_ie || bss->rsn_ie_len != elems.rsn_len || 2750 (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len ||
2722 memcmp(bss->rsn_ie, elems.rsn, elems.rsn_len))) { 2751 memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) {
2723 kfree(bss->rsn_ie); 2752 kfree(bss->rsn_ie);
2724 bss->rsn_ie = kmalloc(elems.rsn_len + 2, GFP_ATOMIC); 2753 bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC);
2725 if (bss->rsn_ie) { 2754 if (bss->rsn_ie) {
2726 memcpy(bss->rsn_ie, elems.rsn - 2, elems.rsn_len + 2); 2755 memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2);
2727 bss->rsn_ie_len = elems.rsn_len + 2; 2756 bss->rsn_ie_len = elems->rsn_len + 2;
2728 } else 2757 } else
2729 bss->rsn_ie_len = 0; 2758 bss->rsn_ie_len = 0;
2730 } else if (!elems.rsn && bss->rsn_ie) { 2759 } else if (!elems->rsn && bss->rsn_ie) {
2731 kfree(bss->rsn_ie); 2760 kfree(bss->rsn_ie);
2732 bss->rsn_ie = NULL; 2761 bss->rsn_ie = NULL;
2733 bss->rsn_ie_len = 0; 2762 bss->rsn_ie_len = 0;
@@ -2747,20 +2776,21 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2747 * inclusion of the WMM Parameters in beacons, however, is optional. 2776 * inclusion of the WMM Parameters in beacons, however, is optional.
2748 */ 2777 */
2749 2778
2750 if (elems.wmm_param && 2779 if (elems->wmm_param &&
2751 (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_param_len || 2780 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len ||
2752 memcmp(bss->wmm_ie, elems.wmm_param, elems.wmm_param_len))) { 2781 memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) {
2753 kfree(bss->wmm_ie); 2782 kfree(bss->wmm_ie);
2754 bss->wmm_ie = kmalloc(elems.wmm_param_len + 2, GFP_ATOMIC); 2783 bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC);
2755 if (bss->wmm_ie) { 2784 if (bss->wmm_ie) {
2756 memcpy(bss->wmm_ie, elems.wmm_param - 2, 2785 memcpy(bss->wmm_ie, elems->wmm_param - 2,
2757 elems.wmm_param_len + 2); 2786 elems->wmm_param_len + 2);
2758 bss->wmm_ie_len = elems.wmm_param_len + 2; 2787 bss->wmm_ie_len = elems->wmm_param_len + 2;
2759 } else 2788 } else
2760 bss->wmm_ie_len = 0; 2789 bss->wmm_ie_len = 0;
2761 } else if (elems.wmm_info && 2790 } else if (elems->wmm_info &&
2762 (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_info_len || 2791 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len ||
2763 memcmp(bss->wmm_ie, elems.wmm_info, elems.wmm_info_len))) { 2792 memcmp(bss->wmm_ie, elems->wmm_info,
2793 elems->wmm_info_len))) {
2764 /* As for certain AP's Fifth bit is not set in WMM IE in 2794 /* As for certain AP's Fifth bit is not set in WMM IE in
2765 * beacon frames.So while parsing the beacon frame the 2795 * beacon frames.So while parsing the beacon frame the
2766 * wmm_info structure is used instead of wmm_param. 2796 * wmm_info structure is used instead of wmm_param.
@@ -2770,14 +2800,14 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2770 * n-band association. 2800 * n-band association.
2771 */ 2801 */
2772 kfree(bss->wmm_ie); 2802 kfree(bss->wmm_ie);
2773 bss->wmm_ie = kmalloc(elems.wmm_info_len + 2, GFP_ATOMIC); 2803 bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC);
2774 if (bss->wmm_ie) { 2804 if (bss->wmm_ie) {
2775 memcpy(bss->wmm_ie, elems.wmm_info - 2, 2805 memcpy(bss->wmm_ie, elems->wmm_info - 2,
2776 elems.wmm_info_len + 2); 2806 elems->wmm_info_len + 2);
2777 bss->wmm_ie_len = elems.wmm_info_len + 2; 2807 bss->wmm_ie_len = elems->wmm_info_len + 2;
2778 } else 2808 } else
2779 bss->wmm_ie_len = 0; 2809 bss->wmm_ie_len = 0;
2780 } else if (!elems.wmm_param && !elems.wmm_info && bss->wmm_ie) { 2810 } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) {
2781 kfree(bss->wmm_ie); 2811 kfree(bss->wmm_ie);
2782 bss->wmm_ie = NULL; 2812 bss->wmm_ie = NULL;
2783 bss->wmm_ie_len = 0; 2813 bss->wmm_ie_len = 0;
@@ -2788,8 +2818,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2788 !local->sta_sw_scanning && !local->sta_hw_scanning && 2818 !local->sta_sw_scanning && !local->sta_hw_scanning &&
2789 bss->capability & WLAN_CAPABILITY_IBSS && 2819 bss->capability & WLAN_CAPABILITY_IBSS &&
2790 bss->freq == local->oper_channel->center_freq && 2820 bss->freq == local->oper_channel->center_freq &&
2791 elems.ssid_len == sdata->u.sta.ssid_len && 2821 elems->ssid_len == sdata->u.sta.ssid_len &&
2792 memcmp(elems.ssid, sdata->u.sta.ssid, sdata->u.sta.ssid_len) == 0) { 2822 memcmp(elems->ssid, sdata->u.sta.ssid,
2823 sdata->u.sta.ssid_len) == 0) {
2793 if (rx_status->flag & RX_FLAG_TSFT) { 2824 if (rx_status->flag & RX_FLAG_TSFT) {
2794 /* in order for correct IBSS merging we need mactime 2825 /* in order for correct IBSS merging we need mactime
2795 * 2826 *
@@ -2827,18 +2858,18 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2827#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2858#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2828 if (beacon_timestamp > rx_timestamp) { 2859 if (beacon_timestamp > rx_timestamp) {
2829#ifndef CONFIG_MAC80211_IBSS_DEBUG 2860#ifndef CONFIG_MAC80211_IBSS_DEBUG
2830 if (net_ratelimit()) 2861 printk(KERN_DEBUG "%s: beacon TSF higher than "
2862 "local TSF - IBSS merge with BSSID %s\n",
2863 dev->name, print_mac(mac, mgmt->bssid));
2831#endif 2864#endif
2832 printk(KERN_DEBUG "%s: beacon TSF higher than "
2833 "local TSF - IBSS merge with BSSID %s\n",
2834 dev->name, print_mac(mac, mgmt->bssid));
2835 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); 2865 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss);
2836 ieee80211_ibss_add_sta(dev, NULL, 2866 ieee80211_ibss_add_sta(dev, NULL,
2837 mgmt->bssid, mgmt->sa); 2867 mgmt->bssid, mgmt->sa,
2868 BIT(rx_status->rate_idx));
2838 } 2869 }
2839 } 2870 }
2840 2871
2841 ieee80211_rx_bss_put(dev, bss); 2872 ieee80211_rx_bss_put(local, bss);
2842} 2873}
2843 2874
2844 2875
@@ -2847,7 +2878,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev,
2847 size_t len, 2878 size_t len,
2848 struct ieee80211_rx_status *rx_status) 2879 struct ieee80211_rx_status *rx_status)
2849{ 2880{
2850 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, 0); 2881 size_t baselen;
2882 struct ieee802_11_elems elems;
2883
2884 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
2885 if (baselen > len)
2886 return;
2887
2888 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
2889 &elems);
2890
2891 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0);
2851} 2892}
2852 2893
2853 2894
@@ -2864,7 +2905,14 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2864 struct ieee80211_conf *conf = &local->hw.conf; 2905 struct ieee80211_conf *conf = &local->hw.conf;
2865 u32 changed = 0; 2906 u32 changed = 0;
2866 2907
2867 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, 1); 2908 /* Process beacon from the current BSS */
2909 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
2910 if (baselen > len)
2911 return;
2912
2913 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2914
2915 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1);
2868 2916
2869 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2917 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2870 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 2918 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
@@ -2875,17 +2923,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2875 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 2923 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
2876 return; 2924 return;
2877 2925
2878 /* Process beacon from the current BSS */ 2926 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2879 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 2927 elems.wmm_param_len);
2880 if (baselen > len)
2881 return;
2882
2883 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2884
2885 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
2886 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2887 elems.wmm_param_len);
2888 }
2889 2928
2890 /* Do not send changes to driver if we are scanning. This removes 2929 /* Do not send changes to driver if we are scanning. This removes
2891 * requirement that driver's bss_info_changed function needs to be 2930 * requirement that driver's bss_info_changed function needs to be
@@ -2962,11 +3001,11 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2962 pos = mgmt->u.probe_req.variable; 3001 pos = mgmt->u.probe_req.variable;
2963 if (pos[0] != WLAN_EID_SSID || 3002 if (pos[0] != WLAN_EID_SSID ||
2964 pos + 2 + pos[1] > end) { 3003 pos + 2 + pos[1] > end) {
2965 if (net_ratelimit()) { 3004#ifdef CONFIG_MAC80211_IBSS_DEBUG
2966 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 3005 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
2967 "from %s\n", 3006 "from %s\n",
2968 dev->name, print_mac(mac, mgmt->sa)); 3007 dev->name, print_mac(mac, mgmt->sa));
2969 } 3008#endif
2970 return; 3009 return;
2971 } 3010 }
2972 if (pos[1] != 0 && 3011 if (pos[1] != 0 &&
@@ -2997,11 +3036,24 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
2997 struct ieee80211_rx_status *rx_status) 3036 struct ieee80211_rx_status *rx_status)
2998{ 3037{
2999 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3038 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3039 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3000 3040
3001 if (len < IEEE80211_MIN_ACTION_SIZE) 3041 if (len < IEEE80211_MIN_ACTION_SIZE)
3002 return; 3042 return;
3003 3043
3004 switch (mgmt->u.action.category) { 3044 switch (mgmt->u.action.category) {
3045 case WLAN_CATEGORY_SPECTRUM_MGMT:
3046 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
3047 break;
3048 switch (mgmt->u.action.u.chan_switch.action_code) {
3049 case WLAN_ACTION_SPCT_MSR_REQ:
3050 if (len < (IEEE80211_MIN_ACTION_SIZE +
3051 sizeof(mgmt->u.action.u.measurement)))
3052 break;
3053 ieee80211_sta_process_measurement_req(dev, mgmt, len);
3054 break;
3055 }
3056 break;
3005 case WLAN_CATEGORY_BACK: 3057 case WLAN_CATEGORY_BACK:
3006 switch (mgmt->u.action.u.addba_req.action_code) { 3058 switch (mgmt->u.action.u.addba_req.action_code) {
3007 case WLAN_ACTION_ADDBA_REQ: 3059 case WLAN_ACTION_ADDBA_REQ:
@@ -3022,11 +3074,6 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
3022 break; 3074 break;
3023 ieee80211_sta_process_delba(dev, mgmt, len); 3075 ieee80211_sta_process_delba(dev, mgmt, len);
3024 break; 3076 break;
3025 default:
3026 if (net_ratelimit())
3027 printk(KERN_DEBUG "%s: Rx unknown A-MPDU action\n",
3028 dev->name);
3029 break;
3030 } 3077 }
3031 break; 3078 break;
3032 case PLINK_CATEGORY: 3079 case PLINK_CATEGORY:
@@ -3037,11 +3084,6 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
3037 if (ieee80211_vif_is_mesh(&sdata->vif)) 3084 if (ieee80211_vif_is_mesh(&sdata->vif))
3038 mesh_rx_path_sel_frame(dev, mgmt, len); 3085 mesh_rx_path_sel_frame(dev, mgmt, len);
3039 break; 3086 break;
3040 default:
3041 if (net_ratelimit())
3042 printk(KERN_DEBUG "%s: Rx unknown action frame - "
3043 "category=%d\n", dev->name, mgmt->u.action.category);
3044 break;
3045 } 3087 }
3046} 3088}
3047 3089
@@ -3077,11 +3119,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3077 skb_queue_tail(&ifsta->skb_queue, skb); 3119 skb_queue_tail(&ifsta->skb_queue, skb);
3078 queue_work(local->hw.workqueue, &ifsta->work); 3120 queue_work(local->hw.workqueue, &ifsta->work);
3079 return; 3121 return;
3080 default:
3081 printk(KERN_DEBUG "%s: received unknown management frame - "
3082 "stype=%d\n", dev->name,
3083 (fc & IEEE80211_FCTL_STYPE) >> 4);
3084 break;
3085 } 3122 }
3086 3123
3087 fail: 3124 fail:
@@ -3145,33 +3182,32 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
3145 struct ieee80211_rx_status *rx_status) 3182 struct ieee80211_rx_status *rx_status)
3146{ 3183{
3147 struct ieee80211_mgmt *mgmt; 3184 struct ieee80211_mgmt *mgmt;
3148 u16 fc; 3185 __le16 fc;
3149 3186
3150 if (skb->len < 2) 3187 if (skb->len < 2)
3151 return RX_DROP_UNUSABLE; 3188 return RX_DROP_UNUSABLE;
3152 3189
3153 mgmt = (struct ieee80211_mgmt *) skb->data; 3190 mgmt = (struct ieee80211_mgmt *) skb->data;
3154 fc = le16_to_cpu(mgmt->frame_control); 3191 fc = mgmt->frame_control;
3155 3192
3156 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 3193 if (ieee80211_is_ctl(fc))
3157 return RX_CONTINUE; 3194 return RX_CONTINUE;
3158 3195
3159 if (skb->len < 24) 3196 if (skb->len < 24)
3160 return RX_DROP_MONITOR; 3197 return RX_DROP_MONITOR;
3161 3198
3162 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 3199 if (ieee80211_is_probe_resp(fc)) {
3163 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) { 3200 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status);
3164 ieee80211_rx_mgmt_probe_resp(dev, mgmt, 3201 dev_kfree_skb(skb);
3165 skb->len, rx_status); 3202 return RX_QUEUED;
3166 dev_kfree_skb(skb); 3203 }
3167 return RX_QUEUED; 3204
3168 } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) { 3205 if (ieee80211_is_beacon(fc)) {
3169 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, 3206 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status);
3170 rx_status); 3207 dev_kfree_skb(skb);
3171 dev_kfree_skb(skb); 3208 return RX_QUEUED;
3172 return RX_QUEUED;
3173 }
3174 } 3209 }
3210
3175 return RX_CONTINUE; 3211 return RX_CONTINUE;
3176} 3212}
3177 3213
@@ -3211,8 +3247,10 @@ static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time)
3211 spin_lock_irqsave(&local->sta_lock, flags); 3247 spin_lock_irqsave(&local->sta_lock, flags);
3212 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) 3248 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
3213 if (time_after(jiffies, sta->last_rx + exp_time)) { 3249 if (time_after(jiffies, sta->last_rx + exp_time)) {
3250#ifdef CONFIG_MAC80211_IBSS_DEBUG
3214 printk(KERN_DEBUG "%s: expiring inactive STA %s\n", 3251 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
3215 dev->name, print_mac(mac, sta->addr)); 3252 dev->name, print_mac(mac, sta->addr));
3253#endif
3216 __sta_info_unlink(&sta); 3254 __sta_info_unlink(&sta);
3217 if (sta) 3255 if (sta)
3218 list_add(&sta->list, &tmp_list); 3256 list_add(&sta->list, &tmp_list);
@@ -3251,7 +3289,7 @@ static void ieee80211_mesh_housekeeping(struct net_device *dev,
3251 3289
3252 free_plinks = mesh_plink_availables(sdata); 3290 free_plinks = mesh_plink_availables(sdata);
3253 if (free_plinks != sdata->u.sta.accepting_plinks) 3291 if (free_plinks != sdata->u.sta.accepting_plinks)
3254 ieee80211_if_config_beacon(dev); 3292 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
3255 3293
3256 mod_timer(&ifsta->timer, jiffies + 3294 mod_timer(&ifsta->timer, jiffies +
3257 IEEE80211_MESH_HOUSEKEEPING_INTERVAL); 3295 IEEE80211_MESH_HOUSEKEEPING_INTERVAL);
@@ -3295,13 +3333,10 @@ void ieee80211_sta_work(struct work_struct *work)
3295 if (local->sta_sw_scanning || local->sta_hw_scanning) 3333 if (local->sta_sw_scanning || local->sta_hw_scanning)
3296 return; 3334 return;
3297 3335
3298 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 3336 if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA &&
3299 sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 3337 sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
3300 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) { 3338 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
3301 printk(KERN_DEBUG "%s: ieee80211_sta_work: non-STA interface "
3302 "(type=%d)\n", dev->name, sdata->vif.type);
3303 return; 3339 return;
3304 }
3305 ifsta = &sdata->u.sta; 3340 ifsta = &sdata->u.sta;
3306 3341
3307 while ((skb = skb_dequeue(&ifsta->skb_queue))) 3342 while ((skb = skb_dequeue(&ifsta->skb_queue)))
@@ -3355,8 +3390,7 @@ void ieee80211_sta_work(struct work_struct *work)
3355 break; 3390 break;
3356#endif 3391#endif
3357 default: 3392 default:
3358 printk(KERN_DEBUG "ieee80211_sta_work: Unknown state %d\n", 3393 WARN_ON(1);
3359 ifsta->state);
3360 break; 3394 break;
3361 } 3395 }
3362 3396
@@ -3391,8 +3425,6 @@ static void ieee80211_sta_reset_auth(struct net_device *dev,
3391 ifsta->auth_alg = WLAN_AUTH_LEAP; 3425 ifsta->auth_alg = WLAN_AUTH_LEAP;
3392 else 3426 else
3393 ifsta->auth_alg = WLAN_AUTH_OPEN; 3427 ifsta->auth_alg = WLAN_AUTH_OPEN;
3394 printk(KERN_DEBUG "%s: Initial auth_alg=%d\n", dev->name,
3395 ifsta->auth_alg);
3396 ifsta->auth_transaction = -1; 3428 ifsta->auth_transaction = -1;
3397 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 3429 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
3398 ifsta->auth_tries = ifsta->assoc_tries = 0; 3430 ifsta->auth_tries = ifsta->assoc_tries = 0;
@@ -3481,9 +3513,9 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
3481 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) 3513 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
3482 continue; 3514 continue;
3483 3515
3484 if (!selected || top_rssi < bss->rssi) { 3516 if (!selected || top_rssi < bss->signal) {
3485 selected = bss; 3517 selected = bss;
3486 top_rssi = bss->rssi; 3518 top_rssi = bss->signal;
3487 } 3519 }
3488 } 3520 }
3489 if (selected) 3521 if (selected)
@@ -3497,7 +3529,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
3497 selected->ssid_len); 3529 selected->ssid_len);
3498 ieee80211_sta_set_bssid(dev, selected->bssid); 3530 ieee80211_sta_set_bssid(dev, selected->bssid);
3499 ieee80211_sta_def_wmm_params(dev, selected, 0); 3531 ieee80211_sta_def_wmm_params(dev, selected, 0);
3500 ieee80211_rx_bss_put(dev, selected); 3532 ieee80211_rx_bss_put(local, selected);
3501 ifsta->state = IEEE80211_AUTHENTICATE; 3533 ifsta->state = IEEE80211_AUTHENTICATE;
3502 ieee80211_sta_reset_auth(dev, ifsta); 3534 ieee80211_sta_reset_auth(dev, ifsta);
3503 return 0; 3535 return 0;
@@ -3556,14 +3588,16 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3556 sband = local->hw.wiphy->bands[bss->band]; 3588 sband = local->hw.wiphy->bands[bss->band];
3557 3589
3558 if (local->hw.conf.beacon_int == 0) 3590 if (local->hw.conf.beacon_int == 0)
3559 local->hw.conf.beacon_int = 10000; 3591 local->hw.conf.beacon_int = 100;
3560 bss->beacon_int = local->hw.conf.beacon_int; 3592 bss->beacon_int = local->hw.conf.beacon_int;
3561 bss->last_update = jiffies; 3593 bss->last_update = jiffies;
3562 bss->capability = WLAN_CAPABILITY_IBSS; 3594 bss->capability = WLAN_CAPABILITY_IBSS;
3563 if (sdata->default_key) { 3595
3596 if (sdata->default_key)
3564 bss->capability |= WLAN_CAPABILITY_PRIVACY; 3597 bss->capability |= WLAN_CAPABILITY_PRIVACY;
3565 } else 3598 else
3566 sdata->drop_unencrypted = 0; 3599 sdata->drop_unencrypted = 0;
3600
3567 bss->supp_rates_len = sband->n_bitrates; 3601 bss->supp_rates_len = sband->n_bitrates;
3568 pos = bss->supp_rates; 3602 pos = bss->supp_rates;
3569 for (i = 0; i < sband->n_bitrates; i++) { 3603 for (i = 0; i < sband->n_bitrates; i++) {
@@ -3572,7 +3606,7 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3572 } 3606 }
3573 3607
3574 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 3608 ret = ieee80211_sta_join_ibss(dev, ifsta, bss);
3575 ieee80211_rx_bss_put(dev, bss); 3609 ieee80211_rx_bss_put(local, bss);
3576 return ret; 3610 return ret;
3577} 3611}
3578 3612
@@ -3628,7 +3662,7 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3628 " based on configured SSID\n", 3662 " based on configured SSID\n",
3629 dev->name, print_mac(mac, bssid)); 3663 dev->name, print_mac(mac, bssid));
3630 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 3664 ret = ieee80211_sta_join_ibss(dev, ifsta, bss);
3631 ieee80211_rx_bss_put(dev, bss); 3665 ieee80211_rx_bss_put(local, bss);
3632 return ret; 3666 return ret;
3633 } 3667 }
3634#ifdef CONFIG_MAC80211_IBSS_DEBUG 3668#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -3679,28 +3713,45 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
3679{ 3713{
3680 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3714 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3681 struct ieee80211_if_sta *ifsta; 3715 struct ieee80211_if_sta *ifsta;
3716 int res;
3682 3717
3683 if (len > IEEE80211_MAX_SSID_LEN) 3718 if (len > IEEE80211_MAX_SSID_LEN)
3684 return -EINVAL; 3719 return -EINVAL;
3685 3720
3686 ifsta = &sdata->u.sta; 3721 ifsta = &sdata->u.sta;
3687 3722
3688 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) 3723 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) {
3724 memset(ifsta->ssid, 0, sizeof(ifsta->ssid));
3725 memcpy(ifsta->ssid, ssid, len);
3726 ifsta->ssid_len = len;
3689 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET; 3727 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
3690 memcpy(ifsta->ssid, ssid, len); 3728
3691 memset(ifsta->ssid + len, 0, IEEE80211_MAX_SSID_LEN - len); 3729 res = 0;
3692 ifsta->ssid_len = len; 3730 /*
3731 * Hack! MLME code needs to be cleaned up to have different
3732 * entry points for configuration and internal selection change
3733 */
3734 if (netif_running(sdata->dev))
3735 res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
3736 if (res) {
3737 printk(KERN_DEBUG "%s: Failed to config new SSID to "
3738 "the low-level driver\n", dev->name);
3739 return res;
3740 }
3741 }
3693 3742
3694 if (len) 3743 if (len)
3695 ifsta->flags |= IEEE80211_STA_SSID_SET; 3744 ifsta->flags |= IEEE80211_STA_SSID_SET;
3696 else 3745 else
3697 ifsta->flags &= ~IEEE80211_STA_SSID_SET; 3746 ifsta->flags &= ~IEEE80211_STA_SSID_SET;
3747
3698 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && 3748 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
3699 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { 3749 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) {
3700 ifsta->ibss_join_req = jiffies; 3750 ifsta->ibss_join_req = jiffies;
3701 ifsta->state = IEEE80211_IBSS_SEARCH; 3751 ifsta->state = IEEE80211_IBSS_SEARCH;
3702 return ieee80211_sta_find_ibss(dev, ifsta); 3752 return ieee80211_sta_find_ibss(dev, ifsta);
3703 } 3753 }
3754
3704 return 0; 3755 return 0;
3705} 3756}
3706 3757
@@ -3726,7 +3777,12 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid)
3726 3777
3727 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { 3778 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
3728 memcpy(ifsta->bssid, bssid, ETH_ALEN); 3779 memcpy(ifsta->bssid, bssid, ETH_ALEN);
3729 res = ieee80211_if_config(dev); 3780 res = 0;
3781 /*
3782 * Hack! See also ieee80211_sta_set_ssid.
3783 */
3784 if (netif_running(sdata->dev))
3785 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
3730 if (res) { 3786 if (res) {
3731 printk(KERN_DEBUG "%s: Failed to config new BSSID to " 3787 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
3732 "the low-level driver\n", dev->name); 3788 "the low-level driver\n", dev->name);
@@ -3749,7 +3805,7 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local,
3749{ 3805{
3750 struct sk_buff *skb; 3806 struct sk_buff *skb;
3751 struct ieee80211_hdr *nullfunc; 3807 struct ieee80211_hdr *nullfunc;
3752 u16 fc; 3808 __le16 fc;
3753 3809
3754 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 3810 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
3755 if (!skb) { 3811 if (!skb) {
@@ -3761,11 +3817,11 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local,
3761 3817
3762 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 3818 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
3763 memset(nullfunc, 0, 24); 3819 memset(nullfunc, 0, 24);
3764 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 3820 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
3765 IEEE80211_FCTL_TODS; 3821 IEEE80211_FCTL_TODS);
3766 if (powersave) 3822 if (powersave)
3767 fc |= IEEE80211_FCTL_PM; 3823 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
3768 nullfunc->frame_control = cpu_to_le16(fc); 3824 nullfunc->frame_control = fc;
3769 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); 3825 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
3770 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 3826 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
3771 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); 3827 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
@@ -3813,6 +3869,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
3813 3869
3814 3870
3815 netif_tx_lock_bh(local->mdev); 3871 netif_tx_lock_bh(local->mdev);
3872 netif_addr_lock(local->mdev);
3816 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; 3873 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
3817 local->ops->configure_filter(local_to_hw(local), 3874 local->ops->configure_filter(local_to_hw(local),
3818 FIF_BCN_PRBRESP_PROMISC, 3875 FIF_BCN_PRBRESP_PROMISC,
@@ -3820,15 +3877,11 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
3820 local->mdev->mc_count, 3877 local->mdev->mc_count,
3821 local->mdev->mc_list); 3878 local->mdev->mc_list);
3822 3879
3880 netif_addr_unlock(local->mdev);
3823 netif_tx_unlock_bh(local->mdev); 3881 netif_tx_unlock_bh(local->mdev);
3824 3882
3825 rcu_read_lock(); 3883 rcu_read_lock();
3826 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3884 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3827
3828 /* No need to wake the master device. */
3829 if (sdata->dev == local->mdev)
3830 continue;
3831
3832 /* Tell AP we're back */ 3885 /* Tell AP we're back */
3833 if (sdata->vif.type == IEEE80211_IF_TYPE_STA && 3886 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
3834 sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) 3887 sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)
@@ -3994,12 +4047,6 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
3994 4047
3995 rcu_read_lock(); 4048 rcu_read_lock();
3996 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4049 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3997
3998 /* Don't stop the master interface, otherwise we can't transmit
3999 * probes! */
4000 if (sdata->dev == local->mdev)
4001 continue;
4002
4003 netif_stop_queue(sdata->dev); 4050 netif_stop_queue(sdata->dev);
4004 if (sdata->vif.type == IEEE80211_IF_TYPE_STA && 4051 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
4005 (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) 4052 (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED))
@@ -4017,14 +4064,14 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
4017 local->scan_band = IEEE80211_BAND_2GHZ; 4064 local->scan_band = IEEE80211_BAND_2GHZ;
4018 local->scan_dev = dev; 4065 local->scan_dev = dev;
4019 4066
4020 netif_tx_lock_bh(local->mdev); 4067 netif_addr_lock_bh(local->mdev);
4021 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; 4068 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
4022 local->ops->configure_filter(local_to_hw(local), 4069 local->ops->configure_filter(local_to_hw(local),
4023 FIF_BCN_PRBRESP_PROMISC, 4070 FIF_BCN_PRBRESP_PROMISC,
4024 &local->filter_flags, 4071 &local->filter_flags,
4025 local->mdev->mc_count, 4072 local->mdev->mc_count,
4026 local->mdev->mc_list); 4073 local->mdev->mc_list);
4027 netif_tx_unlock_bh(local->mdev); 4074 netif_addr_unlock_bh(local->mdev);
4028 4075
4029 /* TODO: start scan as soon as all nullfunc frames are ACKed */ 4076 /* TODO: start scan as soon as all nullfunc frames are ACKed */
4030 queue_delayed_work(local->hw.workqueue, &local->scan_work, 4077 queue_delayed_work(local->hw.workqueue, &local->scan_work,
@@ -4059,6 +4106,7 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
4059 4106
4060static char * 4107static char *
4061ieee80211_sta_scan_result(struct net_device *dev, 4108ieee80211_sta_scan_result(struct net_device *dev,
4109 struct iw_request_info *info,
4062 struct ieee80211_sta_bss *bss, 4110 struct ieee80211_sta_bss *bss,
4063 char *current_ev, char *end_buf) 4111 char *current_ev, char *end_buf)
4064{ 4112{
@@ -4073,7 +4121,7 @@ ieee80211_sta_scan_result(struct net_device *dev,
4073 iwe.cmd = SIOCGIWAP; 4121 iwe.cmd = SIOCGIWAP;
4074 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 4122 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
4075 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); 4123 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
4076 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4124 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4077 IW_EV_ADDR_LEN); 4125 IW_EV_ADDR_LEN);
4078 4126
4079 memset(&iwe, 0, sizeof(iwe)); 4127 memset(&iwe, 0, sizeof(iwe));
@@ -4081,13 +4129,13 @@ ieee80211_sta_scan_result(struct net_device *dev,
4081 if (bss_mesh_cfg(bss)) { 4129 if (bss_mesh_cfg(bss)) {
4082 iwe.u.data.length = bss_mesh_id_len(bss); 4130 iwe.u.data.length = bss_mesh_id_len(bss);
4083 iwe.u.data.flags = 1; 4131 iwe.u.data.flags = 1;
4084 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4132 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4085 bss_mesh_id(bss)); 4133 &iwe, bss_mesh_id(bss));
4086 } else { 4134 } else {
4087 iwe.u.data.length = bss->ssid_len; 4135 iwe.u.data.length = bss->ssid_len;
4088 iwe.u.data.flags = 1; 4136 iwe.u.data.flags = 1;
4089 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4137 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4090 bss->ssid); 4138 &iwe, bss->ssid);
4091 } 4139 }
4092 4140
4093 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) 4141 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
@@ -4100,30 +4148,30 @@ ieee80211_sta_scan_result(struct net_device *dev,
4100 iwe.u.mode = IW_MODE_MASTER; 4148 iwe.u.mode = IW_MODE_MASTER;
4101 else 4149 else
4102 iwe.u.mode = IW_MODE_ADHOC; 4150 iwe.u.mode = IW_MODE_ADHOC;
4103 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4151 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
4104 IW_EV_UINT_LEN); 4152 &iwe, IW_EV_UINT_LEN);
4105 } 4153 }
4106 4154
4107 memset(&iwe, 0, sizeof(iwe)); 4155 memset(&iwe, 0, sizeof(iwe));
4108 iwe.cmd = SIOCGIWFREQ; 4156 iwe.cmd = SIOCGIWFREQ;
4109 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); 4157 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
4110 iwe.u.freq.e = 0; 4158 iwe.u.freq.e = 0;
4111 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4159 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4112 IW_EV_FREQ_LEN); 4160 IW_EV_FREQ_LEN);
4113 4161
4114 memset(&iwe, 0, sizeof(iwe)); 4162 memset(&iwe, 0, sizeof(iwe));
4115 iwe.cmd = SIOCGIWFREQ; 4163 iwe.cmd = SIOCGIWFREQ;
4116 iwe.u.freq.m = bss->freq; 4164 iwe.u.freq.m = bss->freq;
4117 iwe.u.freq.e = 6; 4165 iwe.u.freq.e = 6;
4118 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4166 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4119 IW_EV_FREQ_LEN); 4167 IW_EV_FREQ_LEN);
4120 memset(&iwe, 0, sizeof(iwe)); 4168 memset(&iwe, 0, sizeof(iwe));
4121 iwe.cmd = IWEVQUAL; 4169 iwe.cmd = IWEVQUAL;
4122 iwe.u.qual.qual = bss->signal; 4170 iwe.u.qual.qual = bss->qual;
4123 iwe.u.qual.level = bss->rssi; 4171 iwe.u.qual.level = bss->signal;
4124 iwe.u.qual.noise = bss->noise; 4172 iwe.u.qual.noise = bss->noise;
4125 iwe.u.qual.updated = local->wstats_flags; 4173 iwe.u.qual.updated = local->wstats_flags;
4126 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4174 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4127 IW_EV_QUAL_LEN); 4175 IW_EV_QUAL_LEN);
4128 4176
4129 memset(&iwe, 0, sizeof(iwe)); 4177 memset(&iwe, 0, sizeof(iwe));
@@ -4133,27 +4181,36 @@ ieee80211_sta_scan_result(struct net_device *dev,
4133 else 4181 else
4134 iwe.u.data.flags = IW_ENCODE_DISABLED; 4182 iwe.u.data.flags = IW_ENCODE_DISABLED;
4135 iwe.u.data.length = 0; 4183 iwe.u.data.length = 0;
4136 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ""); 4184 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4185 &iwe, "");
4137 4186
4138 if (bss && bss->wpa_ie) { 4187 if (bss && bss->wpa_ie) {
4139 memset(&iwe, 0, sizeof(iwe)); 4188 memset(&iwe, 0, sizeof(iwe));
4140 iwe.cmd = IWEVGENIE; 4189 iwe.cmd = IWEVGENIE;
4141 iwe.u.data.length = bss->wpa_ie_len; 4190 iwe.u.data.length = bss->wpa_ie_len;
4142 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4191 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4143 bss->wpa_ie); 4192 &iwe, bss->wpa_ie);
4144 } 4193 }
4145 4194
4146 if (bss && bss->rsn_ie) { 4195 if (bss && bss->rsn_ie) {
4147 memset(&iwe, 0, sizeof(iwe)); 4196 memset(&iwe, 0, sizeof(iwe));
4148 iwe.cmd = IWEVGENIE; 4197 iwe.cmd = IWEVGENIE;
4149 iwe.u.data.length = bss->rsn_ie_len; 4198 iwe.u.data.length = bss->rsn_ie_len;
4150 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4199 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4151 bss->rsn_ie); 4200 &iwe, bss->rsn_ie);
4201 }
4202
4203 if (bss && bss->ht_ie) {
4204 memset(&iwe, 0, sizeof(iwe));
4205 iwe.cmd = IWEVGENIE;
4206 iwe.u.data.length = bss->ht_ie_len;
4207 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4208 &iwe, bss->ht_ie);
4152 } 4209 }
4153 4210
4154 if (bss && bss->supp_rates_len > 0) { 4211 if (bss && bss->supp_rates_len > 0) {
4155 /* display all supported rates in readable format */ 4212 /* display all supported rates in readable format */
4156 char *p = current_ev + IW_EV_LCP_LEN; 4213 char *p = current_ev + iwe_stream_lcp_len(info);
4157 int i; 4214 int i;
4158 4215
4159 memset(&iwe, 0, sizeof(iwe)); 4216 memset(&iwe, 0, sizeof(iwe));
@@ -4164,7 +4221,7 @@ ieee80211_sta_scan_result(struct net_device *dev,
4164 for (i = 0; i < bss->supp_rates_len; i++) { 4221 for (i = 0; i < bss->supp_rates_len; i++) {
4165 iwe.u.bitrate.value = ((bss->supp_rates[i] & 4222 iwe.u.bitrate.value = ((bss->supp_rates[i] &
4166 0x7f) * 500000); 4223 0x7f) * 500000);
4167 p = iwe_stream_add_value(current_ev, p, 4224 p = iwe_stream_add_value(info, current_ev, p,
4168 end_buf, &iwe, IW_EV_PARAM_LEN); 4225 end_buf, &iwe, IW_EV_PARAM_LEN);
4169 } 4226 }
4170 current_ev = p; 4227 current_ev = p;
@@ -4178,8 +4235,16 @@ ieee80211_sta_scan_result(struct net_device *dev,
4178 iwe.cmd = IWEVCUSTOM; 4235 iwe.cmd = IWEVCUSTOM;
4179 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); 4236 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
4180 iwe.u.data.length = strlen(buf); 4237 iwe.u.data.length = strlen(buf);
4181 current_ev = iwe_stream_add_point(current_ev, end_buf, 4238 current_ev = iwe_stream_add_point(info, current_ev,
4239 end_buf,
4182 &iwe, buf); 4240 &iwe, buf);
4241 memset(&iwe, 0, sizeof(iwe));
4242 iwe.cmd = IWEVCUSTOM;
4243 sprintf(buf, " Last beacon: %dms ago",
4244 jiffies_to_msecs(jiffies - bss->last_update));
4245 iwe.u.data.length = strlen(buf);
4246 current_ev = iwe_stream_add_point(info, current_ev,
4247 end_buf, &iwe, buf);
4183 kfree(buf); 4248 kfree(buf);
4184 } 4249 }
4185 } 4250 }
@@ -4193,31 +4258,36 @@ ieee80211_sta_scan_result(struct net_device *dev,
4193 iwe.cmd = IWEVCUSTOM; 4258 iwe.cmd = IWEVCUSTOM;
4194 sprintf(buf, "Mesh network (version %d)", cfg[0]); 4259 sprintf(buf, "Mesh network (version %d)", cfg[0]);
4195 iwe.u.data.length = strlen(buf); 4260 iwe.u.data.length = strlen(buf);
4196 current_ev = iwe_stream_add_point(current_ev, end_buf, 4261 current_ev = iwe_stream_add_point(info, current_ev,
4262 end_buf,
4197 &iwe, buf); 4263 &iwe, buf);
4198 sprintf(buf, "Path Selection Protocol ID: " 4264 sprintf(buf, "Path Selection Protocol ID: "
4199 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], 4265 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
4200 cfg[4]); 4266 cfg[4]);
4201 iwe.u.data.length = strlen(buf); 4267 iwe.u.data.length = strlen(buf);
4202 current_ev = iwe_stream_add_point(current_ev, end_buf, 4268 current_ev = iwe_stream_add_point(info, current_ev,
4269 end_buf,
4203 &iwe, buf); 4270 &iwe, buf);
4204 sprintf(buf, "Path Selection Metric ID: " 4271 sprintf(buf, "Path Selection Metric ID: "
4205 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], 4272 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
4206 cfg[8]); 4273 cfg[8]);
4207 iwe.u.data.length = strlen(buf); 4274 iwe.u.data.length = strlen(buf);
4208 current_ev = iwe_stream_add_point(current_ev, end_buf, 4275 current_ev = iwe_stream_add_point(info, current_ev,
4276 end_buf,
4209 &iwe, buf); 4277 &iwe, buf);
4210 sprintf(buf, "Congestion Control Mode ID: " 4278 sprintf(buf, "Congestion Control Mode ID: "
4211 "0x%02X%02X%02X%02X", cfg[9], cfg[10], 4279 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
4212 cfg[11], cfg[12]); 4280 cfg[11], cfg[12]);
4213 iwe.u.data.length = strlen(buf); 4281 iwe.u.data.length = strlen(buf);
4214 current_ev = iwe_stream_add_point(current_ev, end_buf, 4282 current_ev = iwe_stream_add_point(info, current_ev,
4283 end_buf,
4215 &iwe, buf); 4284 &iwe, buf);
4216 sprintf(buf, "Channel Precedence: " 4285 sprintf(buf, "Channel Precedence: "
4217 "0x%02X%02X%02X%02X", cfg[13], cfg[14], 4286 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
4218 cfg[15], cfg[16]); 4287 cfg[15], cfg[16]);
4219 iwe.u.data.length = strlen(buf); 4288 iwe.u.data.length = strlen(buf);
4220 current_ev = iwe_stream_add_point(current_ev, end_buf, 4289 current_ev = iwe_stream_add_point(info, current_ev,
4290 end_buf,
4221 &iwe, buf); 4291 &iwe, buf);
4222 kfree(buf); 4292 kfree(buf);
4223 } 4293 }
@@ -4227,7 +4297,9 @@ ieee80211_sta_scan_result(struct net_device *dev,
4227} 4297}
4228 4298
4229 4299
4230int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len) 4300int ieee80211_sta_scan_results(struct net_device *dev,
4301 struct iw_request_info *info,
4302 char *buf, size_t len)
4231{ 4303{
4232 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 4304 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4233 char *current_ev = buf; 4305 char *current_ev = buf;
@@ -4240,8 +4312,8 @@ int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len)
4240 spin_unlock_bh(&local->sta_bss_lock); 4312 spin_unlock_bh(&local->sta_bss_lock);
4241 return -E2BIG; 4313 return -E2BIG;
4242 } 4314 }
4243 current_ev = ieee80211_sta_scan_result(dev, bss, current_ev, 4315 current_ev = ieee80211_sta_scan_result(dev, info, bss,
4244 end_buf); 4316 current_ev, end_buf);
4245 } 4317 }
4246 spin_unlock_bh(&local->sta_bss_lock); 4318 spin_unlock_bh(&local->sta_bss_lock);
4247 return current_ev - buf; 4319 return current_ev - buf;
@@ -4252,6 +4324,7 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4252{ 4324{
4253 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4325 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4254 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4326 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4327
4255 kfree(ifsta->extra_ie); 4328 kfree(ifsta->extra_ie);
4256 if (len == 0) { 4329 if (len == 0) {
4257 ifsta->extra_ie = NULL; 4330 ifsta->extra_ie = NULL;
@@ -4269,14 +4342,15 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4269} 4342}
4270 4343
4271 4344
4272struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, 4345struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4273 struct sk_buff *skb, u8 *bssid, 4346 struct sk_buff *skb, u8 *bssid,
4274 u8 *addr) 4347 u8 *addr, u64 supp_rates)
4275{ 4348{
4276 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 4349 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4277 struct sta_info *sta; 4350 struct sta_info *sta;
4278 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4351 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4279 DECLARE_MAC_BUF(mac); 4352 DECLARE_MAC_BUF(mac);
4353 int band = local->hw.conf.channel->band;
4280 4354
4281 /* TODO: Could consider removing the least recently used entry and 4355 /* TODO: Could consider removing the least recently used entry and
4282 * allow new one to be added. */ 4356 * allow new one to be added. */
@@ -4288,17 +4362,24 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev,
4288 return NULL; 4362 return NULL;
4289 } 4363 }
4290 4364
4365 if (compare_ether_addr(bssid, sdata->u.sta.bssid))
4366 return NULL;
4367
4368#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
4291 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", 4369 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
4292 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); 4370 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name);
4371#endif
4293 4372
4294 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 4373 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
4295 if (!sta) 4374 if (!sta)
4296 return NULL; 4375 return NULL;
4297 4376
4298 sta->flags |= WLAN_STA_AUTHORIZED; 4377 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
4299 4378
4300 sta->supp_rates[local->hw.conf.channel->band] = 4379 if (supp_rates)
4301 sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band]; 4380 sta->supp_rates[band] = supp_rates;
4381 else
4382 sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band];
4302 4383
4303 rate_control_rate_init(sta, local); 4384 rate_control_rate_init(sta, local);
4304 4385
@@ -4314,7 +4395,7 @@ int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason)
4314 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4395 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4315 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4396 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4316 4397
4317 printk(KERN_DEBUG "%s: deauthenticate(reason=%d)\n", 4398 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
4318 dev->name, reason); 4399 dev->name, reason);
4319 4400
4320 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 4401 if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
@@ -4332,7 +4413,7 @@ int ieee80211_sta_disassociate(struct net_device *dev, u16 reason)
4332 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4413 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4333 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4414 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4334 4415
4335 printk(KERN_DEBUG "%s: disassociate(reason=%d)\n", 4416 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
4336 dev->name, reason); 4417 dev->name, reason);
4337 4418
4338 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 4419 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
@@ -4356,12 +4437,10 @@ void ieee80211_notify_mac(struct ieee80211_hw *hw,
4356 case IEEE80211_NOTIFY_RE_ASSOC: 4437 case IEEE80211_NOTIFY_RE_ASSOC:
4357 rcu_read_lock(); 4438 rcu_read_lock();
4358 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4439 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4440 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
4441 continue;
4359 4442
4360 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 4443 ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta);
4361 ieee80211_sta_req_auth(sdata->dev,
4362 &sdata->u.sta);
4363 }
4364
4365 } 4444 }
4366 rcu_read_unlock(); 4445 rcu_read_unlock();
4367 break; 4446 break;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 841df93807fc..0388c090dfe9 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -176,20 +176,24 @@ void rate_control_get_rate(struct net_device *dev,
176 rcu_read_lock(); 176 rcu_read_lock();
177 sta = sta_info_get(local, hdr->addr1); 177 sta = sta_info_get(local, hdr->addr1);
178 178
179 memset(sel, 0, sizeof(struct rate_selection)); 179 sel->rate_idx = -1;
180 sel->nonerp_idx = -1;
181 sel->probe_idx = -1;
180 182
181 ref->ops->get_rate(ref->priv, dev, sband, skb, sel); 183 ref->ops->get_rate(ref->priv, dev, sband, skb, sel);
182 184
185 BUG_ON(sel->rate_idx < 0);
186
183 /* Select a non-ERP backup rate. */ 187 /* Select a non-ERP backup rate. */
184 if (!sel->nonerp) { 188 if (sel->nonerp_idx < 0) {
185 for (i = 0; i < sband->n_bitrates; i++) { 189 for (i = 0; i < sband->n_bitrates; i++) {
186 struct ieee80211_rate *rate = &sband->bitrates[i]; 190 struct ieee80211_rate *rate = &sband->bitrates[i];
187 if (sel->rate->bitrate < rate->bitrate) 191 if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate)
188 break; 192 break;
189 193
190 if (rate_supported(sta, sband->band, i) && 194 if (rate_supported(sta, sband->band, i) &&
191 !(rate->flags & IEEE80211_RATE_ERP_G)) 195 !(rate->flags & IEEE80211_RATE_ERP_G))
192 sel->nonerp = rate; 196 sel->nonerp_idx = i;
193 } 197 }
194 } 198 }
195 199
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 5b45f33cb766..ede7ab56f65b 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -19,22 +19,22 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "sta_info.h" 20#include "sta_info.h"
21 21
22/* TODO: kdoc */ 22/**
23 * struct rate_selection - rate selection for rate control algos
24 * @rate: selected transmission rate index
25 * @nonerp: Non-ERP rate to use instead if ERP cannot be used
26 * @probe: rate for probing (or -1)
27 *
28 */
23struct rate_selection { 29struct rate_selection {
24 /* Selected transmission rate */ 30 s8 rate_idx, nonerp_idx, probe_idx;
25 struct ieee80211_rate *rate;
26 /* Non-ERP rate to use if mac80211 decides it cannot use an ERP rate */
27 struct ieee80211_rate *nonerp;
28 /* probe with this rate, or NULL for no probing */
29 struct ieee80211_rate *probe;
30}; 31};
31 32
32struct rate_control_ops { 33struct rate_control_ops {
33 struct module *module; 34 struct module *module;
34 const char *name; 35 const char *name;
35 void (*tx_status)(void *priv, struct net_device *dev, 36 void (*tx_status)(void *priv, struct net_device *dev,
36 struct sk_buff *skb, 37 struct sk_buff *skb);
37 struct ieee80211_tx_status *status);
38 void (*get_rate)(void *priv, struct net_device *dev, 38 void (*get_rate)(void *priv, struct net_device *dev,
39 struct ieee80211_supported_band *band, 39 struct ieee80211_supported_band *band,
40 struct sk_buff *skb, 40 struct sk_buff *skb,
@@ -76,13 +76,12 @@ struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
76void rate_control_put(struct rate_control_ref *ref); 76void rate_control_put(struct rate_control_ref *ref);
77 77
78static inline void rate_control_tx_status(struct net_device *dev, 78static inline void rate_control_tx_status(struct net_device *dev,
79 struct sk_buff *skb, 79 struct sk_buff *skb)
80 struct ieee80211_tx_status *status)
81{ 80{
82 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 81 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
83 struct rate_control_ref *ref = local->rate_ctrl; 82 struct rate_control_ref *ref = local->rate_ctrl;
84 83
85 ref->ops->tx_status(ref->priv, dev, skb, status); 84 ref->ops->tx_status(ref->priv, dev, skb);
86} 85}
87 86
88 87
@@ -138,7 +137,7 @@ static inline int rate_supported(struct sta_info *sta,
138 return (sta == NULL || sta->supp_rates[band] & BIT(index)); 137 return (sta == NULL || sta->supp_rates[band] & BIT(index));
139} 138}
140 139
141static inline int 140static inline s8
142rate_lowest_index(struct ieee80211_local *local, 141rate_lowest_index(struct ieee80211_local *local,
143 struct ieee80211_supported_band *sband, 142 struct ieee80211_supported_band *sband,
144 struct sta_info *sta) 143 struct sta_info *sta)
@@ -155,14 +154,6 @@ rate_lowest_index(struct ieee80211_local *local,
155 return 0; 154 return 0;
156} 155}
157 156
158static inline struct ieee80211_rate *
159rate_lowest(struct ieee80211_local *local,
160 struct ieee80211_supported_band *sband,
161 struct sta_info *sta)
162{
163 return &sband->bitrates[rate_lowest_index(local, sband, sta)];
164}
165
166 157
167/* functions for rate control related to a device */ 158/* functions for rate control related to a device */
168int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 159int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
@@ -171,9 +162,7 @@ void rate_control_deinitialize(struct ieee80211_local *local);
171 162
172 163
173/* Rate control algorithms */ 164/* Rate control algorithms */
174#if defined(RC80211_PID_COMPILE) || \ 165#ifdef CONFIG_MAC80211_RC_PID
175 (defined(CONFIG_MAC80211_RC_PID) && \
176 !defined(CONFIG_MAC80211_RC_PID_MODULE))
177extern int rc80211_pid_init(void); 166extern int rc80211_pid_init(void);
178extern void rc80211_pid_exit(void); 167extern void rc80211_pid_exit(void);
179#else 168#else
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 4ea7b97d1af1..0a9135b974b5 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -61,7 +61,7 @@ enum rc_pid_event_type {
61union rc_pid_event_data { 61union rc_pid_event_data {
62 /* RC_PID_EVENT_TX_STATUS */ 62 /* RC_PID_EVENT_TX_STATUS */
63 struct { 63 struct {
64 struct ieee80211_tx_status tx_status; 64 struct ieee80211_tx_info tx_status;
65 }; 65 };
66 /* RC_PID_EVENT_TYPE_RATE_CHANGE */ 66 /* RC_PID_EVENT_TYPE_RATE_CHANGE */
67 /* RC_PID_EVENT_TYPE_TX_RATE */ 67 /* RC_PID_EVENT_TYPE_TX_RATE */
@@ -156,7 +156,7 @@ struct rc_pid_debugfs_entries {
156}; 156};
157 157
158void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, 158void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
159 struct ieee80211_tx_status *stat); 159 struct ieee80211_tx_info *stat);
160 160
161void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf, 161void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf,
162 int index, int rate); 162 int index, int rate);
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index bcd27c1d7594..a914ba73ccf5 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -237,8 +237,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
237} 237}
238 238
239static void rate_control_pid_tx_status(void *priv, struct net_device *dev, 239static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
240 struct sk_buff *skb, 240 struct sk_buff *skb)
241 struct ieee80211_tx_status *status)
242{ 241{
243 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 242 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
244 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 243 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -248,6 +247,7 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
248 struct rc_pid_sta_info *spinfo; 247 struct rc_pid_sta_info *spinfo;
249 unsigned long period; 248 unsigned long period;
250 struct ieee80211_supported_band *sband; 249 struct ieee80211_supported_band *sband;
250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
251 251
252 rcu_read_lock(); 252 rcu_read_lock();
253 253
@@ -259,35 +259,35 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
259 259
260 /* Don't update the state if we're not controlling the rate. */ 260 /* Don't update the state if we're not controlling the rate. */
261 sdata = sta->sdata; 261 sdata = sta->sdata;
262 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { 262 if (sdata->force_unicast_rateidx > -1) {
263 sta->txrate_idx = sdata->bss->max_ratectrl_rateidx; 263 sta->txrate_idx = sdata->max_ratectrl_rateidx;
264 goto unlock; 264 goto unlock;
265 } 265 }
266 266
267 /* Ignore all frames that were sent with a different rate than the rate 267 /* Ignore all frames that were sent with a different rate than the rate
268 * we currently advise mac80211 to use. */ 268 * we currently advise mac80211 to use. */
269 if (status->control.tx_rate != &sband->bitrates[sta->txrate_idx]) 269 if (info->tx_rate_idx != sta->txrate_idx)
270 goto unlock; 270 goto unlock;
271 271
272 spinfo = sta->rate_ctrl_priv; 272 spinfo = sta->rate_ctrl_priv;
273 spinfo->tx_num_xmit++; 273 spinfo->tx_num_xmit++;
274 274
275#ifdef CONFIG_MAC80211_DEBUGFS 275#ifdef CONFIG_MAC80211_DEBUGFS
276 rate_control_pid_event_tx_status(&spinfo->events, status); 276 rate_control_pid_event_tx_status(&spinfo->events, info);
277#endif 277#endif
278 278
279 /* We count frames that totally failed to be transmitted as two bad 279 /* We count frames that totally failed to be transmitted as two bad
280 * frames, those that made it out but had some retries as one good and 280 * frames, those that made it out but had some retries as one good and
281 * one bad frame. */ 281 * one bad frame. */
282 if (status->excessive_retries) { 282 if (info->status.excessive_retries) {
283 spinfo->tx_num_failed += 2; 283 spinfo->tx_num_failed += 2;
284 spinfo->tx_num_xmit++; 284 spinfo->tx_num_xmit++;
285 } else if (status->retry_count) { 285 } else if (info->status.retry_count) {
286 spinfo->tx_num_failed++; 286 spinfo->tx_num_failed++;
287 spinfo->tx_num_xmit++; 287 spinfo->tx_num_xmit++;
288 } 288 }
289 289
290 if (status->excessive_retries) { 290 if (info->status.excessive_retries) {
291 sta->tx_retry_failed++; 291 sta->tx_retry_failed++;
292 sta->tx_num_consecutive_failures++; 292 sta->tx_num_consecutive_failures++;
293 sta->tx_num_mpdu_fail++; 293 sta->tx_num_mpdu_fail++;
@@ -295,8 +295,8 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
295 sta->tx_num_consecutive_failures = 0; 295 sta->tx_num_consecutive_failures = 0;
296 sta->tx_num_mpdu_ok++; 296 sta->tx_num_mpdu_ok++;
297 } 297 }
298 sta->tx_retry_count += status->retry_count; 298 sta->tx_retry_count += info->status.retry_count;
299 sta->tx_num_mpdu_fail += status->retry_count; 299 sta->tx_num_mpdu_fail += info->status.retry_count;
300 300
301 /* Update PID controller state. */ 301 /* Update PID controller state. */
302 period = (HZ * pinfo->sampling_period + 500) / 1000; 302 period = (HZ * pinfo->sampling_period + 500) / 1000;
@@ -330,15 +330,15 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
330 fc = le16_to_cpu(hdr->frame_control); 330 fc = le16_to_cpu(hdr->frame_control);
331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
332 is_multicast_ether_addr(hdr->addr1) || !sta) { 332 is_multicast_ether_addr(hdr->addr1) || !sta) {
333 sel->rate = rate_lowest(local, sband, sta); 333 sel->rate_idx = rate_lowest_index(local, sband, sta);
334 rcu_read_unlock(); 334 rcu_read_unlock();
335 return; 335 return;
336 } 336 }
337 337
338 /* If a forced rate is in effect, select it. */ 338 /* If a forced rate is in effect, select it. */
339 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 339 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
340 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) 340 if (sdata->force_unicast_rateidx > -1)
341 sta->txrate_idx = sdata->bss->force_unicast_rateidx; 341 sta->txrate_idx = sdata->force_unicast_rateidx;
342 342
343 rateidx = sta->txrate_idx; 343 rateidx = sta->txrate_idx;
344 344
@@ -349,7 +349,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
349 349
350 rcu_read_unlock(); 350 rcu_read_unlock();
351 351
352 sel->rate = &sband->bitrates[rateidx]; 352 sel->rate_idx = rateidx;
353 353
354#ifdef CONFIG_MAC80211_DEBUGFS 354#ifdef CONFIG_MAC80211_DEBUGFS
355 rate_control_pid_event_tx_rate( 355 rate_control_pid_event_tx_rate(
@@ -535,11 +535,6 @@ static struct rate_control_ops mac80211_rcpid = {
535#endif 535#endif
536}; 536};
537 537
538MODULE_DESCRIPTION("PID controller based rate control algorithm");
539MODULE_AUTHOR("Stefano Brivio");
540MODULE_AUTHOR("Mattias Nissler");
541MODULE_LICENSE("GPL");
542
543int __init rc80211_pid_init(void) 538int __init rc80211_pid_init(void)
544{ 539{
545 return ieee80211_rate_control_register(&mac80211_rcpid); 540 return ieee80211_rate_control_register(&mac80211_rcpid);
@@ -549,8 +544,3 @@ void rc80211_pid_exit(void)
549{ 544{
550 ieee80211_rate_control_unregister(&mac80211_rcpid); 545 ieee80211_rate_control_unregister(&mac80211_rcpid);
551} 546}
552
553#ifdef CONFIG_MAC80211_RC_PID_MODULE
554module_init(rc80211_pid_init);
555module_exit(rc80211_pid_exit);
556#endif
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index ff5c380f3c13..8121d3bc6835 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -39,11 +39,11 @@ static void rate_control_pid_event(struct rc_pid_event_buffer *buf,
39} 39}
40 40
41void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, 41void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
42 struct ieee80211_tx_status *stat) 42 struct ieee80211_tx_info *stat)
43{ 43{
44 union rc_pid_event_data evd; 44 union rc_pid_event_data evd;
45 45
46 memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_status)); 46 memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_info));
47 rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd); 47 rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd);
48} 48}
49 49
@@ -167,8 +167,8 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
167 switch (ev->type) { 167 switch (ev->type) {
168 case RC_PID_EVENT_TYPE_TX_STATUS: 168 case RC_PID_EVENT_TYPE_TX_STATUS:
169 p += snprintf(pb + p, length - p, "tx_status %u %u", 169 p += snprintf(pb + p, length - p, "tx_status %u %u",
170 ev->data.tx_status.excessive_retries, 170 ev->data.tx_status.status.excessive_retries,
171 ev->data.tx_status.retry_count); 171 ev->data.tx_status.status.retry_count);
172 break; 172 break;
173 case RC_PID_EVENT_TYPE_RATE_CHANGE: 173 case RC_PID_EVENT_TYPE_RATE_CHANGE:
174 p += snprintf(pb + p, length - p, "rate_change %d %d", 174 p += snprintf(pb + p, length - p, "rate_change %d %d",
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0941e5d6a522..6d9ae67c27ca 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -61,22 +61,147 @@ static inline int should_drop_frame(struct ieee80211_rx_status *status,
61 int present_fcs_len, 61 int present_fcs_len,
62 int radiotap_len) 62 int radiotap_len)
63{ 63{
64 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 64 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
65 65
66 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 66 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
67 return 1; 67 return 1;
68 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) 68 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
69 return 1; 69 return 1;
70 if (((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == 70 if (ieee80211_is_ctl(hdr->frame_control) &&
71 cpu_to_le16(IEEE80211_FTYPE_CTL)) && 71 !ieee80211_is_pspoll(hdr->frame_control) &&
72 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) != 72 !ieee80211_is_back_req(hdr->frame_control))
73 cpu_to_le16(IEEE80211_STYPE_PSPOLL)) &&
74 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) !=
75 cpu_to_le16(IEEE80211_STYPE_BACK_REQ)))
76 return 1; 73 return 1;
77 return 0; 74 return 0;
78} 75}
79 76
77static int
78ieee80211_rx_radiotap_len(struct ieee80211_local *local,
79 struct ieee80211_rx_status *status)
80{
81 int len;
82
83 /* always present fields */
84 len = sizeof(struct ieee80211_radiotap_header) + 9;
85
86 if (status->flag & RX_FLAG_TSFT)
87 len += 8;
88 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB ||
89 local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
90 len += 1;
91 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
92 len += 1;
93
94 if (len & 1) /* padding for RX_FLAGS if necessary */
95 len++;
96
97 /* make sure radiotap starts at a naturally aligned address */
98 if (len % 8)
99 len = roundup(len, 8);
100
101 return len;
102}
103
104/**
105 * ieee80211_add_rx_radiotap_header - add radiotap header
106 *
107 * add a radiotap header containing all the fields which the hardware provided.
108 */
109static void
110ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
111 struct sk_buff *skb,
112 struct ieee80211_rx_status *status,
113 struct ieee80211_rate *rate,
114 int rtap_len)
115{
116 struct ieee80211_radiotap_header *rthdr;
117 unsigned char *pos;
118
119 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
120 memset(rthdr, 0, rtap_len);
121
122 /* radiotap header, set always present flags */
123 rthdr->it_present =
124 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
125 (1 << IEEE80211_RADIOTAP_RATE) |
126 (1 << IEEE80211_RADIOTAP_CHANNEL) |
127 (1 << IEEE80211_RADIOTAP_ANTENNA) |
128 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
129 rthdr->it_len = cpu_to_le16(rtap_len);
130
131 pos = (unsigned char *)(rthdr+1);
132
133 /* the order of the following fields is important */
134
135 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime);
138 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8;
141 }
142
143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 pos++;
147
148 /* IEEE80211_RADIOTAP_RATE */
149 *pos = rate->bitrate / 5;
150 pos++;
151
152 /* IEEE80211_RADIOTAP_CHANNEL */
153 *(__le16 *)pos = cpu_to_le16(status->freq);
154 pos += 2;
155 if (status->band == IEEE80211_BAND_5GHZ)
156 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
157 IEEE80211_CHAN_5GHZ);
158 else
159 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN |
160 IEEE80211_CHAN_2GHZ);
161 pos += 2;
162
163 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
164 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
165 *pos = status->signal;
166 rthdr->it_present |=
167 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
168 pos++;
169 }
170
171 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
172 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
173 *pos = status->noise;
174 rthdr->it_present |=
175 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
176 pos++;
177 }
178
179 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
180
181 /* IEEE80211_RADIOTAP_ANTENNA */
182 *pos = status->antenna;
183 pos++;
184
185 /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */
186 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) {
187 *pos = status->signal;
188 rthdr->it_present |=
189 cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL);
190 pos++;
191 }
192
193 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
194
195 /* IEEE80211_RADIOTAP_RX_FLAGS */
196 /* ensure 2 byte alignment for the 2 byte field as required */
197 if ((pos - (unsigned char *)rthdr) & 1)
198 pos++;
199 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
200 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
201 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
202 pos += 2;
203}
204
80/* 205/*
81 * This function copies a received frame to all monitor interfaces and 206 * This function copies a received frame to all monitor interfaces and
82 * returns a cleaned-up SKB that no longer includes the FCS nor the 207 * returns a cleaned-up SKB that no longer includes the FCS nor the
@@ -89,17 +214,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
89{ 214{
90 struct ieee80211_sub_if_data *sdata; 215 struct ieee80211_sub_if_data *sdata;
91 int needed_headroom = 0; 216 int needed_headroom = 0;
92 struct ieee80211_radiotap_header *rthdr;
93 __le64 *rttsft = NULL;
94 struct ieee80211_rtap_fixed_data {
95 u8 flags;
96 u8 rate;
97 __le16 chan_freq;
98 __le16 chan_flags;
99 u8 antsignal;
100 u8 padding_for_rxflags;
101 __le16 rx_flags;
102 } __attribute__ ((packed)) *rtfixed;
103 struct sk_buff *skb, *skb2; 217 struct sk_buff *skb, *skb2;
104 struct net_device *prev_dev = NULL; 218 struct net_device *prev_dev = NULL;
105 int present_fcs_len = 0; 219 int present_fcs_len = 0;
@@ -116,8 +230,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
116 if (status->flag & RX_FLAG_RADIOTAP) 230 if (status->flag & RX_FLAG_RADIOTAP)
117 rtap_len = ieee80211_get_radiotap_len(origskb->data); 231 rtap_len = ieee80211_get_radiotap_len(origskb->data);
118 else 232 else
119 /* room for radiotap header, always present fields and TSFT */ 233 /* room for the radiotap header based on driver features */
120 needed_headroom = sizeof(*rthdr) + sizeof(*rtfixed) + 8; 234 needed_headroom = ieee80211_rx_radiotap_len(local, status);
121 235
122 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 236 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
123 present_fcs_len = FCS_LEN; 237 present_fcs_len = FCS_LEN;
@@ -163,55 +277,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
163 } 277 }
164 278
165 /* if necessary, prepend radiotap information */ 279 /* if necessary, prepend radiotap information */
166 if (!(status->flag & RX_FLAG_RADIOTAP)) { 280 if (!(status->flag & RX_FLAG_RADIOTAP))
167 rtfixed = (void *) skb_push(skb, sizeof(*rtfixed)); 281 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
168 rtap_len = sizeof(*rthdr) + sizeof(*rtfixed); 282 needed_headroom);
169 if (status->flag & RX_FLAG_TSFT) {
170 rttsft = (void *) skb_push(skb, sizeof(*rttsft));
171 rtap_len += 8;
172 }
173 rthdr = (void *) skb_push(skb, sizeof(*rthdr));
174 memset(rthdr, 0, sizeof(*rthdr));
175 memset(rtfixed, 0, sizeof(*rtfixed));
176 rthdr->it_present =
177 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
178 (1 << IEEE80211_RADIOTAP_RATE) |
179 (1 << IEEE80211_RADIOTAP_CHANNEL) |
180 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |
181 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
182 rtfixed->flags = 0;
183 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
184 rtfixed->flags |= IEEE80211_RADIOTAP_F_FCS;
185
186 if (rttsft) {
187 *rttsft = cpu_to_le64(status->mactime);
188 rthdr->it_present |=
189 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
190 }
191
192 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
193 rtfixed->rx_flags = 0;
194 if (status->flag &
195 (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
196 rtfixed->rx_flags |=
197 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
198
199 rtfixed->rate = rate->bitrate / 5;
200
201 rtfixed->chan_freq = cpu_to_le16(status->freq);
202
203 if (status->band == IEEE80211_BAND_5GHZ)
204 rtfixed->chan_flags =
205 cpu_to_le16(IEEE80211_CHAN_OFDM |
206 IEEE80211_CHAN_5GHZ);
207 else
208 rtfixed->chan_flags =
209 cpu_to_le16(IEEE80211_CHAN_DYN |
210 IEEE80211_CHAN_2GHZ);
211
212 rtfixed->antsignal = status->ssi;
213 rthdr->it_len = cpu_to_le16(rtap_len);
214 }
215 283
216 skb_reset_mac_header(skb); 284 skb_reset_mac_header(skb);
217 skb->ip_summed = CHECKSUM_UNNECESSARY; 285 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -253,33 +321,33 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
253 321
254static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 322static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
255{ 323{
256 u8 *data = rx->skb->data; 324 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
257 int tid; 325 int tid;
258 326
259 /* does the frame have a qos control field? */ 327 /* does the frame have a qos control field? */
260 if (WLAN_FC_IS_QOS_DATA(rx->fc)) { 328 if (ieee80211_is_data_qos(hdr->frame_control)) {
261 u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN; 329 u8 *qc = ieee80211_get_qos_ctl(hdr);
262 /* frame has qos control */ 330 /* frame has qos control */
263 tid = qc[0] & QOS_CONTROL_TID_MASK; 331 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
264 if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) 332 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
265 rx->flags |= IEEE80211_RX_AMSDU; 333 rx->flags |= IEEE80211_RX_AMSDU;
266 else 334 else
267 rx->flags &= ~IEEE80211_RX_AMSDU; 335 rx->flags &= ~IEEE80211_RX_AMSDU;
268 } else { 336 } else {
269 if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { 337 /*
270 /* Separate TID for management frames */ 338 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
271 tid = NUM_RX_DATA_QUEUES - 1; 339 *
272 } else { 340 * Sequence numbers for management frames, QoS data
273 /* no qos control present */ 341 * frames with a broadcast/multicast address in the
274 tid = 0; /* 802.1d - Best Effort */ 342 * Address 1 field, and all non-QoS data frames sent
275 } 343 * by QoS STAs are assigned using an additional single
344 * modulo-4096 counter, [...]
345 *
346 * We also use that counter for non-QoS STAs.
347 */
348 tid = NUM_RX_DATA_QUEUES - 1;
276 } 349 }
277 350
278 I802_DEBUG_INC(rx->local->wme_rx_queue[tid]);
279 /* only a debug counter, sta might not be assigned properly yet */
280 if (rx->sta)
281 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
282
283 rx->queue = tid; 351 rx->queue = tid;
284 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 352 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
285 * For now, set skb->priority to 0 for other cases. */ 353 * For now, set skb->priority to 0 for other cases. */
@@ -289,9 +357,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
289static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx) 357static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
290{ 358{
291#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT 359#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
360 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
292 int hdrlen; 361 int hdrlen;
293 362
294 if (!WLAN_FC_DATA_PRESENT(rx->fc)) 363 if (!ieee80211_is_data_present(hdr->frame_control))
295 return; 364 return;
296 365
297 /* 366 /*
@@ -313,7 +382,7 @@ static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
313 * header and the payload is not supported, the driver is required 382 * header and the payload is not supported, the driver is required
314 * to move the 802.11 header further back in that case. 383 * to move the 802.11 header further back in that case.
315 */ 384 */
316 hdrlen = ieee80211_get_hdrlen(rx->fc); 385 hdrlen = ieee80211_hdrlen(hdr->frame_control);
317 if (rx->flags & IEEE80211_RX_AMSDU) 386 if (rx->flags & IEEE80211_RX_AMSDU)
318 hdrlen += ETH_HLEN; 387 hdrlen += ETH_HLEN;
319 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); 388 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
@@ -321,51 +390,9 @@ static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
321} 390}
322 391
323 392
324static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
325 struct sk_buff *skb,
326 struct ieee80211_rx_status *status,
327 struct ieee80211_rate *rate)
328{
329 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
330 u32 load = 0, hdrtime;
331
332 /* Estimate total channel use caused by this frame */
333
334 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
335 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
336
337 if (status->band == IEEE80211_BAND_5GHZ ||
338 (status->band == IEEE80211_BAND_5GHZ &&
339 rate->flags & IEEE80211_RATE_ERP_G))
340 hdrtime = CHAN_UTIL_HDR_SHORT;
341 else
342 hdrtime = CHAN_UTIL_HDR_LONG;
343
344 load = hdrtime;
345 if (!is_multicast_ether_addr(hdr->addr1))
346 load += hdrtime;
347
348 /* TODO: optimise again */
349 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
350
351 /* Divide channel_use by 8 to avoid wrapping around the counter */
352 load >>= CHAN_UTIL_SHIFT;
353
354 return load;
355}
356
357/* rx handlers */ 393/* rx handlers */
358 394
359static ieee80211_rx_result 395static ieee80211_rx_result debug_noinline
360ieee80211_rx_h_if_stats(struct ieee80211_rx_data *rx)
361{
362 if (rx->sta)
363 rx->sta->channel_use_raw += rx->load;
364 rx->sdata->channel_use_raw += rx->load;
365 return RX_CONTINUE;
366}
367
368static ieee80211_rx_result
369ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) 396ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
370{ 397{
371 struct ieee80211_local *local = rx->local; 398 struct ieee80211_local *local = rx->local;
@@ -394,14 +421,11 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
394static ieee80211_rx_result 421static ieee80211_rx_result
395ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 422ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
396{ 423{
397 int hdrlen = ieee80211_get_hdrlen(rx->fc); 424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
398 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 425 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
399 426
400#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) 427 if (ieee80211_is_data(hdr->frame_control)) {
401 428 if (!ieee80211_has_a4(hdr->frame_control))
402 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
403 if (!((rx->fc & IEEE80211_FCTL_FROMDS) &&
404 (rx->fc & IEEE80211_FCTL_TODS)))
405 return RX_DROP_MONITOR; 429 return RX_DROP_MONITOR;
406 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0) 430 if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
407 return RX_DROP_MONITOR; 431 return RX_DROP_MONITOR;
@@ -414,27 +438,30 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
414 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { 438 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
415 struct ieee80211_mgmt *mgmt; 439 struct ieee80211_mgmt *mgmt;
416 440
417 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT) 441 if (!ieee80211_is_mgmt(hdr->frame_control))
418 return RX_DROP_MONITOR; 442 return RX_DROP_MONITOR;
419 443
420 switch (rx->fc & IEEE80211_FCTL_STYPE) { 444 if (ieee80211_is_action(hdr->frame_control)) {
421 case IEEE80211_STYPE_ACTION:
422 mgmt = (struct ieee80211_mgmt *)hdr; 445 mgmt = (struct ieee80211_mgmt *)hdr;
423 if (mgmt->u.action.category != PLINK_CATEGORY) 446 if (mgmt->u.action.category != PLINK_CATEGORY)
424 return RX_DROP_MONITOR; 447 return RX_DROP_MONITOR;
425 /* fall through on else */
426 case IEEE80211_STYPE_PROBE_REQ:
427 case IEEE80211_STYPE_PROBE_RESP:
428 case IEEE80211_STYPE_BEACON:
429 return RX_CONTINUE; 448 return RX_CONTINUE;
430 break;
431 default:
432 return RX_DROP_MONITOR;
433 } 449 }
434 450
435 } else if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 451 if (ieee80211_is_probe_req(hdr->frame_control) ||
436 is_multicast_ether_addr(hdr->addr1) && 452 ieee80211_is_probe_resp(hdr->frame_control) ||
437 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) 453 ieee80211_is_beacon(hdr->frame_control))
454 return RX_CONTINUE;
455
456 return RX_DROP_MONITOR;
457
458 }
459
460#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
461
462 if (ieee80211_is_data(hdr->frame_control) &&
463 is_multicast_ether_addr(hdr->addr1) &&
464 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev))
438 return RX_DROP_MONITOR; 465 return RX_DROP_MONITOR;
439#undef msh_h_get 466#undef msh_h_get
440 467
@@ -442,16 +469,14 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
442} 469}
443 470
444 471
445static ieee80211_rx_result 472static ieee80211_rx_result debug_noinline
446ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 473ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
447{ 474{
448 struct ieee80211_hdr *hdr; 475 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
449
450 hdr = (struct ieee80211_hdr *) rx->skb->data;
451 476
452 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 477 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
453 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 478 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
454 if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && 479 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
455 rx->sta->last_seq_ctrl[rx->queue] == 480 rx->sta->last_seq_ctrl[rx->queue] ==
456 hdr->seq_ctrl)) { 481 hdr->seq_ctrl)) {
457 if (rx->flags & IEEE80211_RX_RA_MATCH) { 482 if (rx->flags & IEEE80211_RX_RA_MATCH) {
@@ -480,15 +505,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
480 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 505 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
481 return ieee80211_rx_mesh_check(rx); 506 return ieee80211_rx_mesh_check(rx);
482 507
483 if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || 508 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
484 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && 509 ieee80211_is_pspoll(hdr->frame_control)) &&
485 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) &&
486 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 510 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
487 (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) { 511 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
488 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && 512 if ((!ieee80211_has_fromds(hdr->frame_control) &&
489 !(rx->fc & IEEE80211_FCTL_TODS) && 513 !ieee80211_has_tods(hdr->frame_control) &&
490 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 514 ieee80211_is_data(hdr->frame_control)) ||
491 || !(rx->flags & IEEE80211_RX_RA_MATCH)) { 515 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
492 /* Drop IBSS frames and frames for other hosts 516 /* Drop IBSS frames and frames for other hosts
493 * silently. */ 517 * silently. */
494 return RX_DROP_MONITOR; 518 return RX_DROP_MONITOR;
@@ -501,10 +525,10 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
501} 525}
502 526
503 527
504static ieee80211_rx_result 528static ieee80211_rx_result debug_noinline
505ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 529ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
506{ 530{
507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
508 int keyidx; 532 int keyidx;
509 int hdrlen; 533 int hdrlen;
510 ieee80211_rx_result result = RX_DROP_UNUSABLE; 534 ieee80211_rx_result result = RX_DROP_UNUSABLE;
@@ -536,7 +560,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
536 * possible. 560 * possible.
537 */ 561 */
538 562
539 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) 563 if (!ieee80211_has_protected(hdr->frame_control))
540 return RX_CONTINUE; 564 return RX_CONTINUE;
541 565
542 /* 566 /*
@@ -565,7 +589,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
565 (rx->status->flag & RX_FLAG_IV_STRIPPED)) 589 (rx->status->flag & RX_FLAG_IV_STRIPPED))
566 return RX_CONTINUE; 590 return RX_CONTINUE;
567 591
568 hdrlen = ieee80211_get_hdrlen(rx->fc); 592 hdrlen = ieee80211_hdrlen(hdr->frame_control);
569 593
570 if (rx->skb->len < 8 + hdrlen) 594 if (rx->skb->len < 8 + hdrlen)
571 return RX_DROP_UNUSABLE; /* TODO: count this? */ 595 return RX_DROP_UNUSABLE; /* TODO: count this? */
@@ -592,17 +616,12 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
592 rx->key->tx_rx_count++; 616 rx->key->tx_rx_count++;
593 /* TODO: add threshold stuff again */ 617 /* TODO: add threshold stuff again */
594 } else { 618 } else {
595#ifdef CONFIG_MAC80211_DEBUG
596 if (net_ratelimit())
597 printk(KERN_DEBUG "%s: RX protected frame,"
598 " but have no key\n", rx->dev->name);
599#endif /* CONFIG_MAC80211_DEBUG */
600 return RX_DROP_MONITOR; 619 return RX_DROP_MONITOR;
601 } 620 }
602 621
603 /* Check for weak IVs if possible */ 622 /* Check for weak IVs if possible */
604 if (rx->sta && rx->key->conf.alg == ALG_WEP && 623 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
605 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && 624 ieee80211_is_data(hdr->frame_control) &&
606 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) || 625 (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
607 !(rx->status->flag & RX_FLAG_DECRYPTED)) && 626 !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
608 ieee80211_wep_is_weak_iv(rx->skb, rx->key)) 627 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
@@ -633,10 +652,8 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
633 652
634 sdata = sta->sdata; 653 sdata = sta->sdata;
635 654
636 if (sdata->bss) 655 atomic_inc(&sdata->bss->num_sta_ps);
637 atomic_inc(&sdata->bss->num_sta_ps); 656 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
638 sta->flags |= WLAN_STA_PS;
639 sta->flags &= ~WLAN_STA_PSPOLL;
640#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 657#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
641 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 658 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
642 dev->name, print_mac(mac, sta->addr), sta->aid); 659 dev->name, print_mac(mac, sta->addr), sta->aid);
@@ -649,15 +666,14 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
649 struct sk_buff *skb; 666 struct sk_buff *skb;
650 int sent = 0; 667 int sent = 0;
651 struct ieee80211_sub_if_data *sdata; 668 struct ieee80211_sub_if_data *sdata;
652 struct ieee80211_tx_packet_data *pkt_data; 669 struct ieee80211_tx_info *info;
653 DECLARE_MAC_BUF(mac); 670 DECLARE_MAC_BUF(mac);
654 671
655 sdata = sta->sdata; 672 sdata = sta->sdata;
656 673
657 if (sdata->bss) 674 atomic_dec(&sdata->bss->num_sta_ps);
658 atomic_dec(&sdata->bss->num_sta_ps);
659 675
660 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_PSPOLL); 676 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
661 677
662 if (!skb_queue_empty(&sta->ps_tx_buf)) 678 if (!skb_queue_empty(&sta->ps_tx_buf))
663 sta_info_clear_tim_bit(sta); 679 sta_info_clear_tim_bit(sta);
@@ -669,13 +685,13 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
669 685
670 /* Send all buffered frames to the station */ 686 /* Send all buffered frames to the station */
671 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 687 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
672 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 688 info = IEEE80211_SKB_CB(skb);
673 sent++; 689 sent++;
674 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 690 info->flags |= IEEE80211_TX_CTL_REQUEUE;
675 dev_queue_xmit(skb); 691 dev_queue_xmit(skb);
676 } 692 }
677 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { 693 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
678 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 694 info = IEEE80211_SKB_CB(skb);
679 local->total_ps_buffered--; 695 local->total_ps_buffered--;
680 sent++; 696 sent++;
681#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 697#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -683,19 +699,19 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
683 "since STA not sleeping anymore\n", dev->name, 699 "since STA not sleeping anymore\n", dev->name,
684 print_mac(mac, sta->addr), sta->aid); 700 print_mac(mac, sta->addr), sta->aid);
685#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 701#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
686 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 702 info->flags |= IEEE80211_TX_CTL_REQUEUE;
687 dev_queue_xmit(skb); 703 dev_queue_xmit(skb);
688 } 704 }
689 705
690 return sent; 706 return sent;
691} 707}
692 708
693static ieee80211_rx_result 709static ieee80211_rx_result debug_noinline
694ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 710ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
695{ 711{
696 struct sta_info *sta = rx->sta; 712 struct sta_info *sta = rx->sta;
697 struct net_device *dev = rx->dev; 713 struct net_device *dev = rx->dev;
698 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 714 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
699 715
700 if (!sta) 716 if (!sta)
701 return RX_CONTINUE; 717 return RX_CONTINUE;
@@ -725,24 +741,26 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
725 741
726 sta->rx_fragments++; 742 sta->rx_fragments++;
727 sta->rx_bytes += rx->skb->len; 743 sta->rx_bytes += rx->skb->len;
728 sta->last_rssi = rx->status->ssi;
729 sta->last_signal = rx->status->signal; 744 sta->last_signal = rx->status->signal;
745 sta->last_qual = rx->status->qual;
730 sta->last_noise = rx->status->noise; 746 sta->last_noise = rx->status->noise;
731 747
732 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { 748 if (!ieee80211_has_morefrags(hdr->frame_control) &&
749 (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP ||
750 rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) {
733 /* Change STA power saving mode only in the end of a frame 751 /* Change STA power saving mode only in the end of a frame
734 * exchange sequence */ 752 * exchange sequence */
735 if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) 753 if (test_sta_flags(sta, WLAN_STA_PS) &&
754 !ieee80211_has_pm(hdr->frame_control))
736 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); 755 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta);
737 else if (!(sta->flags & WLAN_STA_PS) && 756 else if (!test_sta_flags(sta, WLAN_STA_PS) &&
738 (rx->fc & IEEE80211_FCTL_PM)) 757 ieee80211_has_pm(hdr->frame_control))
739 ap_sta_ps_start(dev, sta); 758 ap_sta_ps_start(dev, sta);
740 } 759 }
741 760
742 /* Drop data::nullfunc frames silently, since they are used only to 761 /* Drop data::nullfunc frames silently, since they are used only to
743 * control station power saving mode. */ 762 * control station power saving mode. */
744 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 763 if (ieee80211_is_nullfunc(hdr->frame_control)) {
745 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_NULLFUNC) {
746 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 764 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
747 /* Update counter and free packet here to avoid counting this 765 /* Update counter and free packet here to avoid counting this
748 * as a dropped packed. */ 766 * as a dropped packed. */
@@ -768,7 +786,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
768 sdata->fragment_next = 0; 786 sdata->fragment_next = 0;
769 787
770 if (!skb_queue_empty(&entry->skb_list)) { 788 if (!skb_queue_empty(&entry->skb_list)) {
771#ifdef CONFIG_MAC80211_DEBUG 789#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
772 struct ieee80211_hdr *hdr = 790 struct ieee80211_hdr *hdr =
773 (struct ieee80211_hdr *) entry->skb_list.next->data; 791 (struct ieee80211_hdr *) entry->skb_list.next->data;
774 DECLARE_MAC_BUF(mac); 792 DECLARE_MAC_BUF(mac);
@@ -780,7 +798,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
780 jiffies - entry->first_frag_time, entry->seq, 798 jiffies - entry->first_frag_time, entry->seq,
781 entry->last_frag, print_mac(mac, hdr->addr1), 799 entry->last_frag, print_mac(mac, hdr->addr1),
782 print_mac(mac2, hdr->addr2)); 800 print_mac(mac2, hdr->addr2));
783#endif /* CONFIG_MAC80211_DEBUG */ 801#endif
784 __skb_queue_purge(&entry->skb_list); 802 __skb_queue_purge(&entry->skb_list);
785 } 803 }
786 804
@@ -837,7 +855,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
837 return NULL; 855 return NULL;
838} 856}
839 857
840static ieee80211_rx_result 858static ieee80211_rx_result debug_noinline
841ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 859ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
842{ 860{
843 struct ieee80211_hdr *hdr; 861 struct ieee80211_hdr *hdr;
@@ -901,18 +919,8 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
901 break; 919 break;
902 } 920 }
903 rpn = rx->key->u.ccmp.rx_pn[rx->queue]; 921 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
904 if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { 922 if (memcmp(pn, rpn, CCMP_PN_LEN))
905 if (net_ratelimit())
906 printk(KERN_DEBUG "%s: defrag: CCMP PN not "
907 "sequential A2=%s"
908 " PN=%02x%02x%02x%02x%02x%02x "
909 "(expected %02x%02x%02x%02x%02x%02x)\n",
910 rx->dev->name, print_mac(mac, hdr->addr2),
911 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4],
912 rpn[5], pn[0], pn[1], pn[2], pn[3],
913 pn[4], pn[5]);
914 return RX_DROP_UNUSABLE; 923 return RX_DROP_UNUSABLE;
915 }
916 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 924 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
917 } 925 }
918 926
@@ -953,7 +961,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
953 return RX_CONTINUE; 961 return RX_CONTINUE;
954} 962}
955 963
956static ieee80211_rx_result 964static ieee80211_rx_result debug_noinline
957ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) 965ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
958{ 966{
959 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 967 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
@@ -988,7 +996,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
988 * Tell TX path to send one frame even though the STA may 996 * Tell TX path to send one frame even though the STA may
989 * still remain is PS mode after this frame exchange. 997 * still remain is PS mode after this frame exchange.
990 */ 998 */
991 rx->sta->flags |= WLAN_STA_PSPOLL; 999 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
992 1000
993#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1001#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
994 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 1002 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
@@ -1016,7 +1024,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1016 * have nothing buffered for it? 1024 * have nothing buffered for it?
1017 */ 1025 */
1018 printk(KERN_DEBUG "%s: STA %s sent PS Poll even " 1026 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
1019 "though there is no buffered frames for it\n", 1027 "though there are no buffered frames for it\n",
1020 rx->dev->name, print_mac(mac, rx->sta->addr)); 1028 rx->dev->name, print_mac(mac, rx->sta->addr));
1021#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1029#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1022 } 1030 }
@@ -1028,22 +1036,22 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1028 return RX_QUEUED; 1036 return RX_QUEUED;
1029} 1037}
1030 1038
1031static ieee80211_rx_result 1039static ieee80211_rx_result debug_noinline
1032ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) 1040ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1033{ 1041{
1034 u16 fc = rx->fc;
1035 u8 *data = rx->skb->data; 1042 u8 *data = rx->skb->data;
1036 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; 1043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1037 1044
1038 if (!WLAN_FC_IS_QOS_DATA(fc)) 1045 if (!ieee80211_is_data_qos(hdr->frame_control))
1039 return RX_CONTINUE; 1046 return RX_CONTINUE;
1040 1047
1041 /* remove the qos control field, update frame type and meta-data */ 1048 /* remove the qos control field, update frame type and meta-data */
1042 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); 1049 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1043 hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2); 1050 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1051 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1044 /* change frame type to non QOS */ 1052 /* change frame type to non QOS */
1045 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; 1053 rx->fc &= ~IEEE80211_STYPE_QOS_DATA;
1046 hdr->frame_control = cpu_to_le16(fc); 1054 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1047 1055
1048 return RX_CONTINUE; 1056 return RX_CONTINUE;
1049} 1057}
@@ -1051,14 +1059,9 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1051static int 1059static int
1052ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1060ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1053{ 1061{
1054 if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) { 1062 if (unlikely(!rx->sta ||
1055#ifdef CONFIG_MAC80211_DEBUG 1063 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1056 if (net_ratelimit())
1057 printk(KERN_DEBUG "%s: dropped frame "
1058 "(unauthorized port)\n", rx->dev->name);
1059#endif /* CONFIG_MAC80211_DEBUG */
1060 return -EACCES; 1064 return -EACCES;
1061 }
1062 1065
1063 return 0; 1066 return 0;
1064} 1067}
@@ -1138,16 +1141,8 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1138 memcpy(src, hdr->addr2, ETH_ALEN); 1141 memcpy(src, hdr->addr2, ETH_ALEN);
1139 1142
1140 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && 1143 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
1141 sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) { 1144 sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
1142 if (net_ratelimit())
1143 printk(KERN_DEBUG "%s: dropped ToDS frame "
1144 "(BSSID=%s SA=%s DA=%s)\n",
1145 dev->name,
1146 print_mac(mac, hdr->addr1),
1147 print_mac(mac2, hdr->addr2),
1148 print_mac(mac3, hdr->addr3));
1149 return -1; 1145 return -1;
1150 }
1151 break; 1146 break;
1152 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 1147 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1153 /* RA TA DA SA */ 1148 /* RA TA DA SA */
@@ -1155,17 +1150,8 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1155 memcpy(src, hdr->addr4, ETH_ALEN); 1150 memcpy(src, hdr->addr4, ETH_ALEN);
1156 1151
1157 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && 1152 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1158 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) { 1153 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
1159 if (net_ratelimit())
1160 printk(KERN_DEBUG "%s: dropped FromDS&ToDS "
1161 "frame (RA=%s TA=%s DA=%s SA=%s)\n",
1162 rx->dev->name,
1163 print_mac(mac, hdr->addr1),
1164 print_mac(mac2, hdr->addr2),
1165 print_mac(mac3, hdr->addr3),
1166 print_mac(mac4, hdr->addr4));
1167 return -1; 1154 return -1;
1168 }
1169 break; 1155 break;
1170 case IEEE80211_FCTL_FROMDS: 1156 case IEEE80211_FCTL_FROMDS:
1171 /* DA BSSID SA */ 1157 /* DA BSSID SA */
@@ -1182,27 +1168,13 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1182 memcpy(dst, hdr->addr1, ETH_ALEN); 1168 memcpy(dst, hdr->addr1, ETH_ALEN);
1183 memcpy(src, hdr->addr2, ETH_ALEN); 1169 memcpy(src, hdr->addr2, ETH_ALEN);
1184 1170
1185 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { 1171 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1186 if (net_ratelimit()) {
1187 printk(KERN_DEBUG "%s: dropped IBSS frame "
1188 "(DA=%s SA=%s BSSID=%s)\n",
1189 dev->name,
1190 print_mac(mac, hdr->addr1),
1191 print_mac(mac2, hdr->addr2),
1192 print_mac(mac3, hdr->addr3));
1193 }
1194 return -1; 1172 return -1;
1195 }
1196 break; 1173 break;
1197 } 1174 }
1198 1175
1199 if (unlikely(skb->len - hdrlen < 8)) { 1176 if (unlikely(skb->len - hdrlen < 8))
1200 if (net_ratelimit()) {
1201 printk(KERN_DEBUG "%s: RX too short data frame "
1202 "payload\n", dev->name);
1203 }
1204 return -1; 1177 return -1;
1205 }
1206 1178
1207 payload = skb->data + hdrlen; 1179 payload = skb->data + hdrlen;
1208 ethertype = (payload[6] << 8) | payload[7]; 1180 ethertype = (payload[6] << 8) | payload[7];
@@ -1345,7 +1317,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1345 } 1317 }
1346} 1318}
1347 1319
1348static ieee80211_rx_result 1320static ieee80211_rx_result debug_noinline
1349ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1321ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1350{ 1322{
1351 struct net_device *dev = rx->dev; 1323 struct net_device *dev = rx->dev;
@@ -1394,10 +1366,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1394 1366
1395 padding = ((4 - subframe_len) & 0x3); 1367 padding = ((4 - subframe_len) & 0x3);
1396 /* the last MSDU has no padding */ 1368 /* the last MSDU has no padding */
1397 if (subframe_len > remaining) { 1369 if (subframe_len > remaining)
1398 printk(KERN_DEBUG "%s: wrong buffer size\n", dev->name);
1399 return RX_DROP_UNUSABLE; 1370 return RX_DROP_UNUSABLE;
1400 }
1401 1371
1402 skb_pull(skb, sizeof(struct ethhdr)); 1372 skb_pull(skb, sizeof(struct ethhdr));
1403 /* if last subframe reuse skb */ 1373 /* if last subframe reuse skb */
@@ -1418,8 +1388,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1418 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + 1388 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1419 padding); 1389 padding);
1420 if (!eth) { 1390 if (!eth) {
1421 printk(KERN_DEBUG "%s: wrong buffer size\n",
1422 dev->name);
1423 dev_kfree_skb(frame); 1391 dev_kfree_skb(frame);
1424 return RX_DROP_UNUSABLE; 1392 return RX_DROP_UNUSABLE;
1425 } 1393 }
@@ -1462,7 +1430,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1462 return RX_QUEUED; 1430 return RX_QUEUED;
1463} 1431}
1464 1432
1465static ieee80211_rx_result 1433static ieee80211_rx_result debug_noinline
1466ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1434ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1467{ 1435{
1468 struct net_device *dev = rx->dev; 1436 struct net_device *dev = rx->dev;
@@ -1493,21 +1461,21 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1493 return RX_QUEUED; 1461 return RX_QUEUED;
1494} 1462}
1495 1463
1496static ieee80211_rx_result 1464static ieee80211_rx_result debug_noinline
1497ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 1465ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1498{ 1466{
1499 struct ieee80211_local *local = rx->local; 1467 struct ieee80211_local *local = rx->local;
1500 struct ieee80211_hw *hw = &local->hw; 1468 struct ieee80211_hw *hw = &local->hw;
1501 struct sk_buff *skb = rx->skb; 1469 struct sk_buff *skb = rx->skb;
1502 struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data; 1470 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1503 struct tid_ampdu_rx *tid_agg_rx; 1471 struct tid_ampdu_rx *tid_agg_rx;
1504 u16 start_seq_num; 1472 u16 start_seq_num;
1505 u16 tid; 1473 u16 tid;
1506 1474
1507 if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL)) 1475 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1508 return RX_CONTINUE; 1476 return RX_CONTINUE;
1509 1477
1510 if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) { 1478 if (ieee80211_is_back_req(bar->frame_control)) {
1511 if (!rx->sta) 1479 if (!rx->sta)
1512 return RX_CONTINUE; 1480 return RX_CONTINUE;
1513 tid = le16_to_cpu(bar->control) >> 12; 1481 tid = le16_to_cpu(bar->control) >> 12;
@@ -1537,7 +1505,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1537 return RX_CONTINUE; 1505 return RX_CONTINUE;
1538} 1506}
1539 1507
1540static ieee80211_rx_result 1508static ieee80211_rx_result debug_noinline
1541ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 1509ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1542{ 1510{
1543 struct ieee80211_sub_if_data *sdata; 1511 struct ieee80211_sub_if_data *sdata;
@@ -1561,41 +1529,27 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1561 struct ieee80211_hdr *hdr, 1529 struct ieee80211_hdr *hdr,
1562 struct ieee80211_rx_data *rx) 1530 struct ieee80211_rx_data *rx)
1563{ 1531{
1564 int keyidx, hdrlen; 1532 int keyidx;
1533 unsigned int hdrlen;
1565 DECLARE_MAC_BUF(mac); 1534 DECLARE_MAC_BUF(mac);
1566 DECLARE_MAC_BUF(mac2); 1535 DECLARE_MAC_BUF(mac2);
1567 1536
1568 hdrlen = ieee80211_get_hdrlen_from_skb(rx->skb); 1537 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1569 if (rx->skb->len >= hdrlen + 4) 1538 if (rx->skb->len >= hdrlen + 4)
1570 keyidx = rx->skb->data[hdrlen + 3] >> 6; 1539 keyidx = rx->skb->data[hdrlen + 3] >> 6;
1571 else 1540 else
1572 keyidx = -1; 1541 keyidx = -1;
1573 1542
1574 if (net_ratelimit())
1575 printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC "
1576 "failure from %s to %s keyidx=%d\n",
1577 dev->name, print_mac(mac, hdr->addr2),
1578 print_mac(mac2, hdr->addr1), keyidx);
1579
1580 if (!rx->sta) { 1543 if (!rx->sta) {
1581 /* 1544 /*
1582 * Some hardware seem to generate incorrect Michael MIC 1545 * Some hardware seem to generate incorrect Michael MIC
1583 * reports; ignore them to avoid triggering countermeasures. 1546 * reports; ignore them to avoid triggering countermeasures.
1584 */ 1547 */
1585 if (net_ratelimit())
1586 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1587 "error for unknown address %s\n",
1588 dev->name, print_mac(mac, hdr->addr2));
1589 goto ignore; 1548 goto ignore;
1590 } 1549 }
1591 1550
1592 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) { 1551 if (!ieee80211_has_protected(hdr->frame_control))
1593 if (net_ratelimit())
1594 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1595 "error for a frame with no PROTECTED flag (src "
1596 "%s)\n", dev->name, print_mac(mac, hdr->addr2));
1597 goto ignore; 1552 goto ignore;
1598 }
1599 1553
1600 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { 1554 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) {
1601 /* 1555 /*
@@ -1604,24 +1558,12 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1604 * group keys and only the AP is sending real multicast 1558 * group keys and only the AP is sending real multicast
1605 * frames in the BSS. 1559 * frames in the BSS.
1606 */ 1560 */
1607 if (net_ratelimit())
1608 printk(KERN_DEBUG "%s: ignored Michael MIC error for "
1609 "a frame with non-zero keyidx (%d)"
1610 " (src %s)\n", dev->name, keyidx,
1611 print_mac(mac, hdr->addr2));
1612 goto ignore; 1561 goto ignore;
1613 } 1562 }
1614 1563
1615 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 1564 if (!ieee80211_is_data(hdr->frame_control) &&
1616 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 1565 !ieee80211_is_auth(hdr->frame_control))
1617 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) {
1618 if (net_ratelimit())
1619 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1620 "error for a frame that cannot be encrypted "
1621 "(fc=0x%04x) (src %s)\n",
1622 dev->name, rx->fc, print_mac(mac, hdr->addr2));
1623 goto ignore; 1566 goto ignore;
1624 }
1625 1567
1626 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); 1568 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr);
1627 ignore: 1569 ignore:
@@ -1710,67 +1652,57 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1710 dev_kfree_skb(skb); 1652 dev_kfree_skb(skb);
1711} 1653}
1712 1654
1713typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_rx_data *);
1714static ieee80211_rx_handler ieee80211_rx_handlers[] =
1715{
1716 ieee80211_rx_h_if_stats,
1717 ieee80211_rx_h_passive_scan,
1718 ieee80211_rx_h_check,
1719 ieee80211_rx_h_decrypt,
1720 ieee80211_rx_h_sta_process,
1721 ieee80211_rx_h_defragment,
1722 ieee80211_rx_h_ps_poll,
1723 ieee80211_rx_h_michael_mic_verify,
1724 /* this must be after decryption - so header is counted in MPDU mic
1725 * must be before pae and data, so QOS_DATA format frames
1726 * are not passed to user space by these functions
1727 */
1728 ieee80211_rx_h_remove_qos_control,
1729 ieee80211_rx_h_amsdu,
1730 ieee80211_rx_h_data,
1731 ieee80211_rx_h_ctrl,
1732 ieee80211_rx_h_mgmt,
1733 NULL
1734};
1735 1655
1736static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, 1656static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1737 struct ieee80211_rx_data *rx, 1657 struct ieee80211_rx_data *rx,
1738 struct sk_buff *skb) 1658 struct sk_buff *skb)
1739{ 1659{
1740 ieee80211_rx_handler *handler;
1741 ieee80211_rx_result res = RX_DROP_MONITOR; 1660 ieee80211_rx_result res = RX_DROP_MONITOR;
1742 1661
1743 rx->skb = skb; 1662 rx->skb = skb;
1744 rx->sdata = sdata; 1663 rx->sdata = sdata;
1745 rx->dev = sdata->dev; 1664 rx->dev = sdata->dev;
1746 1665
1747 for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) { 1666#define CALL_RXH(rxh) \
1748 res = (*handler)(rx); 1667 res = rxh(rx); \
1749 1668 if (res != RX_CONTINUE) \
1750 switch (res) { 1669 goto rxh_done;
1751 case RX_CONTINUE: 1670
1752 continue; 1671 CALL_RXH(ieee80211_rx_h_passive_scan)
1753 case RX_DROP_UNUSABLE: 1672 CALL_RXH(ieee80211_rx_h_check)
1754 case RX_DROP_MONITOR: 1673 CALL_RXH(ieee80211_rx_h_decrypt)
1755 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 1674 CALL_RXH(ieee80211_rx_h_sta_process)
1756 if (rx->sta) 1675 CALL_RXH(ieee80211_rx_h_defragment)
1757 rx->sta->rx_dropped++; 1676 CALL_RXH(ieee80211_rx_h_ps_poll)
1758 break; 1677 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1759 case RX_QUEUED: 1678 /* must be after MMIC verify so header is counted in MPDU mic */
1760 I802_DEBUG_INC(sdata->local->rx_handlers_queued); 1679 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1761 break; 1680 CALL_RXH(ieee80211_rx_h_amsdu)
1762 } 1681 CALL_RXH(ieee80211_rx_h_data)
1763 break; 1682 CALL_RXH(ieee80211_rx_h_ctrl)
1764 } 1683 CALL_RXH(ieee80211_rx_h_mgmt)
1765 1684
1685#undef CALL_RXH
1686
1687 rxh_done:
1766 switch (res) { 1688 switch (res) {
1767 case RX_CONTINUE:
1768 case RX_DROP_MONITOR: 1689 case RX_DROP_MONITOR:
1690 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1691 if (rx->sta)
1692 rx->sta->rx_dropped++;
1693 /* fall through */
1694 case RX_CONTINUE:
1769 ieee80211_rx_cooked_monitor(rx); 1695 ieee80211_rx_cooked_monitor(rx);
1770 break; 1696 break;
1771 case RX_DROP_UNUSABLE: 1697 case RX_DROP_UNUSABLE:
1698 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1699 if (rx->sta)
1700 rx->sta->rx_dropped++;
1772 dev_kfree_skb(rx->skb); 1701 dev_kfree_skb(rx->skb);
1773 break; 1702 break;
1703 case RX_QUEUED:
1704 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1705 break;
1774 } 1706 }
1775} 1707}
1776 1708
@@ -1801,9 +1733,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1801 case IEEE80211_IF_TYPE_IBSS: 1733 case IEEE80211_IF_TYPE_IBSS:
1802 if (!bssid) 1734 if (!bssid)
1803 return 0; 1735 return 0;
1804 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && 1736 if (ieee80211_is_beacon(hdr->frame_control)) {
1805 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) 1737 if (!rx->sta)
1738 rx->sta = ieee80211_ibss_add_sta(sdata->dev,
1739 rx->skb, bssid, hdr->addr2,
1740 BIT(rx->status->rate_idx));
1806 return 1; 1741 return 1;
1742 }
1807 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1743 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1808 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 1744 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1809 return 0; 1745 return 0;
@@ -1816,7 +1752,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1816 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1752 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1817 } else if (!rx->sta) 1753 } else if (!rx->sta)
1818 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, 1754 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb,
1819 bssid, hdr->addr2); 1755 bssid, hdr->addr2,
1756 BIT(rx->status->rate_idx));
1820 break; 1757 break;
1821 case IEEE80211_IF_TYPE_MESH_POINT: 1758 case IEEE80211_IF_TYPE_MESH_POINT:
1822 if (!multicast && 1759 if (!multicast &&
@@ -1840,15 +1777,9 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1840 return 0; 1777 return 0;
1841 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1778 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1842 } 1779 }
1843 if (sdata->dev == sdata->local->mdev &&
1844 !(rx->flags & IEEE80211_RX_IN_SCAN))
1845 /* do not receive anything via
1846 * master device when not scanning */
1847 return 0;
1848 break; 1780 break;
1849 case IEEE80211_IF_TYPE_WDS: 1781 case IEEE80211_IF_TYPE_WDS:
1850 if (bssid || 1782 if (bssid || !ieee80211_is_data(hdr->frame_control))
1851 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
1852 return 0; 1783 return 0;
1853 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 1784 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
1854 return 0; 1785 return 0;
@@ -1872,7 +1803,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1872static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 1803static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1873 struct sk_buff *skb, 1804 struct sk_buff *skb,
1874 struct ieee80211_rx_status *status, 1805 struct ieee80211_rx_status *status,
1875 u32 load,
1876 struct ieee80211_rate *rate) 1806 struct ieee80211_rate *rate)
1877{ 1807{
1878 struct ieee80211_local *local = hw_to_local(hw); 1808 struct ieee80211_local *local = hw_to_local(hw);
@@ -1891,7 +1821,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1891 rx.local = local; 1821 rx.local = local;
1892 1822
1893 rx.status = status; 1823 rx.status = status;
1894 rx.load = load;
1895 rx.rate = rate; 1824 rx.rate = rate;
1896 rx.fc = le16_to_cpu(hdr->frame_control); 1825 rx.fc = le16_to_cpu(hdr->frame_control);
1897 type = rx.fc & IEEE80211_FCTL_FTYPE; 1826 type = rx.fc & IEEE80211_FCTL_FTYPE;
@@ -2000,7 +1929,6 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2000 struct ieee80211_rx_status status; 1929 struct ieee80211_rx_status status;
2001 u16 head_seq_num, buf_size; 1930 u16 head_seq_num, buf_size;
2002 int index; 1931 int index;
2003 u32 pkt_load;
2004 struct ieee80211_supported_band *sband; 1932 struct ieee80211_supported_band *sband;
2005 struct ieee80211_rate *rate; 1933 struct ieee80211_rate *rate;
2006 1934
@@ -2035,12 +1963,9 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2035 sizeof(status)); 1963 sizeof(status));
2036 sband = local->hw.wiphy->bands[status.band]; 1964 sband = local->hw.wiphy->bands[status.band];
2037 rate = &sband->bitrates[status.rate_idx]; 1965 rate = &sband->bitrates[status.rate_idx];
2038 pkt_load = ieee80211_rx_load_stats(local,
2039 tid_agg_rx->reorder_buf[index],
2040 &status, rate);
2041 __ieee80211_rx_handle_packet(hw, 1966 __ieee80211_rx_handle_packet(hw,
2042 tid_agg_rx->reorder_buf[index], 1967 tid_agg_rx->reorder_buf[index],
2043 &status, pkt_load, rate); 1968 &status, rate);
2044 tid_agg_rx->stored_mpdu_num--; 1969 tid_agg_rx->stored_mpdu_num--;
2045 tid_agg_rx->reorder_buf[index] = NULL; 1970 tid_agg_rx->reorder_buf[index] = NULL;
2046 } 1971 }
@@ -2082,11 +2007,8 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2082 sizeof(status)); 2007 sizeof(status));
2083 sband = local->hw.wiphy->bands[status.band]; 2008 sband = local->hw.wiphy->bands[status.band];
2084 rate = &sband->bitrates[status.rate_idx]; 2009 rate = &sband->bitrates[status.rate_idx];
2085 pkt_load = ieee80211_rx_load_stats(local,
2086 tid_agg_rx->reorder_buf[index],
2087 &status, rate);
2088 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 2010 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2089 &status, pkt_load, rate); 2011 &status, rate);
2090 tid_agg_rx->stored_mpdu_num--; 2012 tid_agg_rx->stored_mpdu_num--;
2091 tid_agg_rx->reorder_buf[index] = NULL; 2013 tid_agg_rx->reorder_buf[index] = NULL;
2092 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 2014 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -2103,32 +2025,29 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2103 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2025 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2104 struct sta_info *sta; 2026 struct sta_info *sta;
2105 struct tid_ampdu_rx *tid_agg_rx; 2027 struct tid_ampdu_rx *tid_agg_rx;
2106 u16 fc, sc; 2028 u16 sc;
2107 u16 mpdu_seq_num; 2029 u16 mpdu_seq_num;
2108 u8 ret = 0, *qc; 2030 u8 ret = 0;
2109 int tid; 2031 int tid;
2110 2032
2111 sta = sta_info_get(local, hdr->addr2); 2033 sta = sta_info_get(local, hdr->addr2);
2112 if (!sta) 2034 if (!sta)
2113 return ret; 2035 return ret;
2114 2036
2115 fc = le16_to_cpu(hdr->frame_control);
2116
2117 /* filter the QoS data rx stream according to 2037 /* filter the QoS data rx stream according to
2118 * STA/TID and check if this STA/TID is on aggregation */ 2038 * STA/TID and check if this STA/TID is on aggregation */
2119 if (!WLAN_FC_IS_QOS_DATA(fc)) 2039 if (!ieee80211_is_data_qos(hdr->frame_control))
2120 goto end_reorder; 2040 goto end_reorder;
2121 2041
2122 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN; 2042 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2123 tid = qc[0] & QOS_CONTROL_TID_MASK;
2124 2043
2125 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) 2044 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2126 goto end_reorder; 2045 goto end_reorder;
2127 2046
2128 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; 2047 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2129 2048
2130 /* null data frames are excluded */ 2049 /* qos null data frames are excluded */
2131 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC)) 2050 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2132 goto end_reorder; 2051 goto end_reorder;
2133 2052
2134 /* new un-ordered ampdu frame - process it */ 2053 /* new un-ordered ampdu frame - process it */
@@ -2165,7 +2084,6 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2165 struct ieee80211_rx_status *status) 2084 struct ieee80211_rx_status *status)
2166{ 2085{
2167 struct ieee80211_local *local = hw_to_local(hw); 2086 struct ieee80211_local *local = hw_to_local(hw);
2168 u32 pkt_load;
2169 struct ieee80211_rate *rate = NULL; 2087 struct ieee80211_rate *rate = NULL;
2170 struct ieee80211_supported_band *sband; 2088 struct ieee80211_supported_band *sband;
2171 2089
@@ -2205,11 +2123,8 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2205 return; 2123 return;
2206 } 2124 }
2207 2125
2208 pkt_load = ieee80211_rx_load_stats(local, skb, status, rate);
2209 local->channel_use_raw += pkt_load;
2210
2211 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2126 if (!ieee80211_rx_reorder_ampdu(local, skb))
2212 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate); 2127 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2213 2128
2214 rcu_read_unlock(); 2129 rcu_read_unlock();
2215} 2130}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7d4fe4a52929..f2ba653b9d69 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -135,6 +135,7 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
135/** 135/**
136 * __sta_info_free - internal STA free helper 136 * __sta_info_free - internal STA free helper
137 * 137 *
138 * @local: pointer to the global information
138 * @sta: STA info to free 139 * @sta: STA info to free
139 * 140 *
140 * This function must undo everything done by sta_info_alloc() 141 * This function must undo everything done by sta_info_alloc()
@@ -202,14 +203,12 @@ void sta_info_destroy(struct sta_info *sta)
202 dev_kfree_skb_any(skb); 203 dev_kfree_skb_any(skb);
203 204
204 for (i = 0; i < STA_TID_NUM; i++) { 205 for (i = 0; i < STA_TID_NUM; i++) {
205 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 206 spin_lock_bh(&sta->lock);
206 if (sta->ampdu_mlme.tid_rx[i]) 207 if (sta->ampdu_mlme.tid_rx[i])
207 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer); 208 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer);
208 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
209 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
210 if (sta->ampdu_mlme.tid_tx[i]) 209 if (sta->ampdu_mlme.tid_tx[i])
211 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer); 210 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer);
212 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 211 spin_unlock_bh(&sta->lock);
213 } 212 }
214 213
215 __sta_info_free(local, sta); 214 __sta_info_free(local, sta);
@@ -236,6 +235,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
236 if (!sta) 235 if (!sta)
237 return NULL; 236 return NULL;
238 237
238 spin_lock_init(&sta->lock);
239 spin_lock_init(&sta->flaglock);
240
239 memcpy(sta->addr, addr, ETH_ALEN); 241 memcpy(sta->addr, addr, ETH_ALEN);
240 sta->local = local; 242 sta->local = local;
241 sta->sdata = sdata; 243 sta->sdata = sdata;
@@ -249,15 +251,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
249 return NULL; 251 return NULL;
250 } 252 }
251 253
252 spin_lock_init(&sta->ampdu_mlme.ampdu_rx);
253 spin_lock_init(&sta->ampdu_mlme.ampdu_tx);
254 for (i = 0; i < STA_TID_NUM; i++) { 254 for (i = 0; i < STA_TID_NUM; i++) {
255 /* timer_to_tid must be initialized with identity mapping to 255 /* timer_to_tid must be initialized with identity mapping to
256 * enable session_timer's data differentiation. refer to 256 * enable session_timer's data differentiation. refer to
257 * sta_rx_agg_session_timer_expired for useage */ 257 * sta_rx_agg_session_timer_expired for useage */
258 sta->timer_to_tid[i] = i; 258 sta->timer_to_tid[i] = i;
259 /* tid to tx queue: initialize according to HW (0 is valid) */ 259 /* tid to tx queue: initialize according to HW (0 is valid) */
260 sta->tid_to_tx_q[i] = local->hw.queues; 260 sta->tid_to_tx_q[i] = ieee80211_num_queues(&local->hw);
261 /* rx */ 261 /* rx */
262 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; 262 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
263 sta->ampdu_mlme.tid_rx[i] = NULL; 263 sta->ampdu_mlme.tid_rx[i] = NULL;
@@ -276,7 +276,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
276 276
277#ifdef CONFIG_MAC80211_MESH 277#ifdef CONFIG_MAC80211_MESH
278 sta->plink_state = PLINK_LISTEN; 278 sta->plink_state = PLINK_LISTEN;
279 spin_lock_init(&sta->plink_lock);
280 init_timer(&sta->plink_timer); 279 init_timer(&sta->plink_timer);
281#endif 280#endif
282 281
@@ -321,7 +320,9 @@ int sta_info_insert(struct sta_info *sta)
321 /* notify driver */ 320 /* notify driver */
322 if (local->ops->sta_notify) { 321 if (local->ops->sta_notify) {
323 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 322 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
324 sdata = sdata->u.vlan.ap; 323 sdata = container_of(sdata->bss,
324 struct ieee80211_sub_if_data,
325 u.ap);
325 326
326 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 327 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
327 STA_NOTIFY_ADD, sta->addr); 328 STA_NOTIFY_ADD, sta->addr);
@@ -376,8 +377,10 @@ static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
376static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, 377static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
377 struct sta_info *sta) 378 struct sta_info *sta)
378{ 379{
379 if (bss) 380 BUG_ON(!bss);
380 __bss_tim_set(bss, sta->aid); 381
382 __bss_tim_set(bss, sta->aid);
383
381 if (sta->local->ops->set_tim) { 384 if (sta->local->ops->set_tim) {
382 sta->local->tim_in_locked_section = true; 385 sta->local->tim_in_locked_section = true;
383 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1); 386 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1);
@@ -389,6 +392,8 @@ void sta_info_set_tim_bit(struct sta_info *sta)
389{ 392{
390 unsigned long flags; 393 unsigned long flags;
391 394
395 BUG_ON(!sta->sdata->bss);
396
392 spin_lock_irqsave(&sta->local->sta_lock, flags); 397 spin_lock_irqsave(&sta->local->sta_lock, flags);
393 __sta_info_set_tim_bit(sta->sdata->bss, sta); 398 __sta_info_set_tim_bit(sta->sdata->bss, sta);
394 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 399 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
@@ -397,8 +402,10 @@ void sta_info_set_tim_bit(struct sta_info *sta)
397static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, 402static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
398 struct sta_info *sta) 403 struct sta_info *sta)
399{ 404{
400 if (bss) 405 BUG_ON(!bss);
401 __bss_tim_clear(bss, sta->aid); 406
407 __bss_tim_clear(bss, sta->aid);
408
402 if (sta->local->ops->set_tim) { 409 if (sta->local->ops->set_tim) {
403 sta->local->tim_in_locked_section = true; 410 sta->local->tim_in_locked_section = true;
404 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0); 411 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0);
@@ -410,6 +417,8 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
410{ 417{
411 unsigned long flags; 418 unsigned long flags;
412 419
420 BUG_ON(!sta->sdata->bss);
421
413 spin_lock_irqsave(&sta->local->sta_lock, flags); 422 spin_lock_irqsave(&sta->local->sta_lock, flags);
414 __sta_info_clear_tim_bit(sta->sdata->bss, sta); 423 __sta_info_clear_tim_bit(sta->sdata->bss, sta);
415 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 424 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
@@ -437,10 +446,10 @@ void __sta_info_unlink(struct sta_info **sta)
437 446
438 list_del(&(*sta)->list); 447 list_del(&(*sta)->list);
439 448
440 if ((*sta)->flags & WLAN_STA_PS) { 449 if (test_and_clear_sta_flags(*sta, WLAN_STA_PS)) {
441 (*sta)->flags &= ~WLAN_STA_PS; 450 BUG_ON(!sdata->bss);
442 if (sdata->bss) 451
443 atomic_dec(&sdata->bss->num_sta_ps); 452 atomic_dec(&sdata->bss->num_sta_ps);
444 __sta_info_clear_tim_bit(sdata->bss, *sta); 453 __sta_info_clear_tim_bit(sdata->bss, *sta);
445 } 454 }
446 455
@@ -448,7 +457,9 @@ void __sta_info_unlink(struct sta_info **sta)
448 457
449 if (local->ops->sta_notify) { 458 if (local->ops->sta_notify) {
450 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 459 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
451 sdata = sdata->u.vlan.ap; 460 sdata = container_of(sdata->bss,
461 struct ieee80211_sub_if_data,
462 u.ap);
452 463
453 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 464 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
454 STA_NOTIFY_REMOVE, (*sta)->addr); 465 STA_NOTIFY_REMOVE, (*sta)->addr);
@@ -515,20 +526,20 @@ static inline int sta_info_buffer_expired(struct ieee80211_local *local,
515 struct sta_info *sta, 526 struct sta_info *sta,
516 struct sk_buff *skb) 527 struct sk_buff *skb)
517{ 528{
518 struct ieee80211_tx_packet_data *pkt_data; 529 struct ieee80211_tx_info *info;
519 int timeout; 530 int timeout;
520 531
521 if (!skb) 532 if (!skb)
522 return 0; 533 return 0;
523 534
524 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 535 info = IEEE80211_SKB_CB(skb);
525 536
526 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 537 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */
527 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 / 538 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 /
528 15625) * HZ; 539 15625) * HZ;
529 if (timeout < STA_TX_BUFFER_EXPIRE) 540 if (timeout < STA_TX_BUFFER_EXPIRE)
530 timeout = STA_TX_BUFFER_EXPIRE; 541 timeout = STA_TX_BUFFER_EXPIRE;
531 return time_after(jiffies, pkt_data->jiffies + timeout); 542 return time_after(jiffies, info->control.jiffies + timeout);
532} 543}
533 544
534 545
@@ -557,8 +568,10 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
557 568
558 sdata = sta->sdata; 569 sdata = sta->sdata;
559 local->total_ps_buffered--; 570 local->total_ps_buffered--;
571#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
560 printk(KERN_DEBUG "Buffered frame expired (STA " 572 printk(KERN_DEBUG "Buffered frame expired (STA "
561 "%s)\n", print_mac(mac, sta->addr)); 573 "%s)\n", print_mac(mac, sta->addr));
574#endif
562 dev_kfree_skb(skb); 575 dev_kfree_skb(skb);
563 576
564 if (skb_queue_empty(&sta->ps_tx_buf)) 577 if (skb_queue_empty(&sta->ps_tx_buf))
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index f8c95bc9659c..109db787ccb7 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -32,7 +32,7 @@
32 * @WLAN_STA_WDS: Station is one of our WDS peers. 32 * @WLAN_STA_WDS: Station is one of our WDS peers.
33 * @WLAN_STA_PSPOLL: Station has just PS-polled us. 33 * @WLAN_STA_PSPOLL: Station has just PS-polled us.
34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the 34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
35 * IEEE80211_TXCTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 */ 37 */
38enum ieee80211_sta_info_flags { 38enum ieee80211_sta_info_flags {
@@ -129,23 +129,19 @@ enum plink_state {
129 * 129 *
130 * @tid_state_rx: TID's state in Rx session state machine. 130 * @tid_state_rx: TID's state in Rx session state machine.
131 * @tid_rx: aggregation info for Rx per TID 131 * @tid_rx: aggregation info for Rx per TID
132 * @ampdu_rx: for locking sections in aggregation Rx flow
133 * @tid_state_tx: TID's state in Tx session state machine. 132 * @tid_state_tx: TID's state in Tx session state machine.
134 * @tid_tx: aggregation info for Tx per TID 133 * @tid_tx: aggregation info for Tx per TID
135 * @addba_req_num: number of times addBA request has been sent. 134 * @addba_req_num: number of times addBA request has been sent.
136 * @ampdu_tx: for locking sectionsi in aggregation Tx flow
137 * @dialog_token_allocator: dialog token enumerator for each new session; 135 * @dialog_token_allocator: dialog token enumerator for each new session;
138 */ 136 */
139struct sta_ampdu_mlme { 137struct sta_ampdu_mlme {
140 /* rx */ 138 /* rx */
141 u8 tid_state_rx[STA_TID_NUM]; 139 u8 tid_state_rx[STA_TID_NUM];
142 struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; 140 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
143 spinlock_t ampdu_rx;
144 /* tx */ 141 /* tx */
145 u8 tid_state_tx[STA_TID_NUM]; 142 u8 tid_state_tx[STA_TID_NUM];
146 struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; 143 struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
147 u8 addba_req_num[STA_TID_NUM]; 144 u8 addba_req_num[STA_TID_NUM];
148 spinlock_t ampdu_tx;
149 u8 dialog_token_allocator; 145 u8 dialog_token_allocator;
150}; 146};
151 147
@@ -164,9 +160,20 @@ struct sta_ampdu_mlme {
164 * @list: global linked list entry 160 * @list: global linked list entry
165 * @hnext: hash table linked list pointer 161 * @hnext: hash table linked list pointer
166 * @local: pointer to the global information 162 * @local: pointer to the global information
163 * @sdata: TBD
164 * @key: TBD
165 * @rate_ctrl: TBD
166 * @rate_ctrl_priv: TBD
167 * @lock: used for locking all fields that require locking, see comments
168 * in the header file.
169 * @flaglock: spinlock for flags accesses
170 * @ht_info: HT capabilities of this STA
171 * @supp_rates: Bitmap of supported rates (per band)
167 * @addr: MAC address of this STA 172 * @addr: MAC address of this STA
168 * @aid: STA's unique AID (1..2007, 0 = not assigned yet), 173 * @aid: STA's unique AID (1..2007, 0 = not assigned yet),
169 * only used in AP (and IBSS?) mode 174 * only used in AP (and IBSS?) mode
175 * @listen_interval: TBD
176 * @pin_status: TBD
170 * @flags: STA flags, see &enum ieee80211_sta_info_flags 177 * @flags: STA flags, see &enum ieee80211_sta_info_flags
171 * @ps_tx_buf: buffer of frames to transmit to this station 178 * @ps_tx_buf: buffer of frames to transmit to this station
172 * when it leaves power saving state 179 * when it leaves power saving state
@@ -175,8 +182,41 @@ struct sta_ampdu_mlme {
175 * power saving state 182 * power saving state
176 * @rx_packets: Number of MSDUs received from this STA 183 * @rx_packets: Number of MSDUs received from this STA
177 * @rx_bytes: Number of bytes received from this STA 184 * @rx_bytes: Number of bytes received from this STA
178 * @supp_rates: Bitmap of supported rates (per band) 185 * @wep_weak_iv_count: TBD
179 * @ht_info: HT capabilities of this STA 186 * @last_rx: TBD
187 * @num_duplicates: number of duplicate frames received from this STA
188 * @rx_fragments: number of received MPDUs
189 * @rx_dropped: number of dropped MPDUs from this STA
190 * @last_signal: signal of last received frame from this STA
191 * @last_qual: qual of last received frame from this STA
192 * @last_noise: noise of last received frame from this STA
193 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
194 * @wme_rx_queue: TBD
195 * @tx_filtered_count: TBD
196 * @tx_retry_failed: TBD
197 * @tx_retry_count: TBD
198 * @tx_num_consecutive_failures: TBD
199 * @tx_num_mpdu_ok: TBD
200 * @tx_num_mpdu_fail: TBD
201 * @fail_avg: moving percentage of failed MSDUs
202 * @tx_packets: number of RX/TX MSDUs
203 * @tx_bytes: TBD
204 * @tx_fragments: number of transmitted MPDUs
205 * @txrate_idx: TBD
206 * @last_txrate_idx: TBD
207 * @wme_tx_queue: TBD
208 * @ampdu_mlme: TBD
209 * @timer_to_tid: identity mapping to ID timers
210 * @tid_to_tx_q: map tid to tx queue
211 * @llid: Local link ID
212 * @plid: Peer link ID
213 * @reason: Cancel reason on PLINK_HOLDING state
214 * @plink_retries: Retries in establishment
215 * @ignore_plink_timer: TBD
216 * @plink_state plink_state: TBD
217 * @plink_timeout: TBD
218 * @plink_timer: TBD
219 * @debugfs: debug filesystem info
180 */ 220 */
181struct sta_info { 221struct sta_info {
182 /* General information, mostly static */ 222 /* General information, mostly static */
@@ -187,6 +227,8 @@ struct sta_info {
187 struct ieee80211_key *key; 227 struct ieee80211_key *key;
188 struct rate_control_ref *rate_ctrl; 228 struct rate_control_ref *rate_ctrl;
189 void *rate_ctrl_priv; 229 void *rate_ctrl_priv;
230 spinlock_t lock;
231 spinlock_t flaglock;
190 struct ieee80211_ht_info ht_info; 232 struct ieee80211_ht_info ht_info;
191 u64 supp_rates[IEEE80211_NUM_BANDS]; 233 u64 supp_rates[IEEE80211_NUM_BANDS];
192 u8 addr[ETH_ALEN]; 234 u8 addr[ETH_ALEN];
@@ -199,7 +241,10 @@ struct sta_info {
199 */ 241 */
200 u8 pin_status; 242 u8 pin_status;
201 243
202 /* frequently updated information, needs locking? */ 244 /*
245 * frequently updated, locked with own spinlock (flaglock),
246 * use the accessors defined below
247 */
203 u32 flags; 248 u32 flags;
204 249
205 /* 250 /*
@@ -213,14 +258,12 @@ struct sta_info {
213 unsigned long rx_packets, rx_bytes; 258 unsigned long rx_packets, rx_bytes;
214 unsigned long wep_weak_iv_count; 259 unsigned long wep_weak_iv_count;
215 unsigned long last_rx; 260 unsigned long last_rx;
216 unsigned long num_duplicates; /* number of duplicate frames received 261 unsigned long num_duplicates;
217 * from this STA */ 262 unsigned long rx_fragments;
218 unsigned long rx_fragments; /* number of received MPDUs */ 263 unsigned long rx_dropped;
219 unsigned long rx_dropped; /* number of dropped MPDUs from this STA */ 264 int last_signal;
220 int last_rssi; /* RSSI of last received frame from this STA */ 265 int last_qual;
221 int last_signal; /* signal of last received frame from this STA */ 266 int last_noise;
222 int last_noise; /* noise of last received frame from this STA */
223 /* last received seq/frag number from this STA (per RX queue) */
224 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 267 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
225#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 268#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
226 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; 269 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES];
@@ -237,42 +280,36 @@ struct sta_info {
237 unsigned int fail_avg; 280 unsigned int fail_avg;
238 281
239 /* Updated from TX path only, no locking requirements */ 282 /* Updated from TX path only, no locking requirements */
240 unsigned long tx_packets; /* number of RX/TX MSDUs */ 283 unsigned long tx_packets;
241 unsigned long tx_bytes; 284 unsigned long tx_bytes;
242 unsigned long tx_fragments; /* number of transmitted MPDUs */ 285 unsigned long tx_fragments;
243 int txrate_idx; 286 int txrate_idx;
244 int last_txrate_idx; 287 int last_txrate_idx;
288 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
245#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 289#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
246 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; 290 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
247#endif 291#endif
248 292
249 /* Debug counters, no locking doesn't matter */
250 int channel_use;
251 int channel_use_raw;
252
253 /* 293 /*
254 * Aggregation information, comes with own locking. 294 * Aggregation information, locked with lock.
255 */ 295 */
256 struct sta_ampdu_mlme ampdu_mlme; 296 struct sta_ampdu_mlme ampdu_mlme;
257 u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */ 297 u8 timer_to_tid[STA_TID_NUM];
258 u8 tid_to_tx_q[STA_TID_NUM]; /* map tid to tx queue */ 298 u8 tid_to_tx_q[STA_TID_NUM];
259 299
260#ifdef CONFIG_MAC80211_MESH 300#ifdef CONFIG_MAC80211_MESH
261 /* 301 /*
262 * Mesh peer link attributes 302 * Mesh peer link attributes
263 * TODO: move to a sub-structure that is referenced with pointer? 303 * TODO: move to a sub-structure that is referenced with pointer?
264 */ 304 */
265 __le16 llid; /* Local link ID */ 305 __le16 llid;
266 __le16 plid; /* Peer link ID */ 306 __le16 plid;
267 __le16 reason; /* Cancel reason on PLINK_HOLDING state */ 307 __le16 reason;
268 u8 plink_retries; /* Retries in establishment */ 308 u8 plink_retries;
269 bool ignore_plink_timer; 309 bool ignore_plink_timer;
270 enum plink_state plink_state; 310 enum plink_state plink_state;
271 u32 plink_timeout; 311 u32 plink_timeout;
272 struct timer_list plink_timer; 312 struct timer_list plink_timer;
273 spinlock_t plink_lock; /* For peer_state reads / updates and other
274 updates in the structure. Ensures robust
275 transitions for the peerlink FSM */
276#endif 313#endif
277 314
278#ifdef CONFIG_MAC80211_DEBUGFS 315#ifdef CONFIG_MAC80211_DEBUGFS
@@ -299,6 +336,73 @@ static inline enum plink_state sta_plink_state(struct sta_info *sta)
299 return PLINK_LISTEN; 336 return PLINK_LISTEN;
300} 337}
301 338
339static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
340{
341 unsigned long irqfl;
342
343 spin_lock_irqsave(&sta->flaglock, irqfl);
344 sta->flags |= flags;
345 spin_unlock_irqrestore(&sta->flaglock, irqfl);
346}
347
348static inline void clear_sta_flags(struct sta_info *sta, const u32 flags)
349{
350 unsigned long irqfl;
351
352 spin_lock_irqsave(&sta->flaglock, irqfl);
353 sta->flags &= ~flags;
354 spin_unlock_irqrestore(&sta->flaglock, irqfl);
355}
356
357static inline void set_and_clear_sta_flags(struct sta_info *sta,
358 const u32 set, const u32 clear)
359{
360 unsigned long irqfl;
361
362 spin_lock_irqsave(&sta->flaglock, irqfl);
363 sta->flags |= set;
364 sta->flags &= ~clear;
365 spin_unlock_irqrestore(&sta->flaglock, irqfl);
366}
367
368static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags)
369{
370 u32 ret;
371 unsigned long irqfl;
372
373 spin_lock_irqsave(&sta->flaglock, irqfl);
374 ret = sta->flags & flags;
375 spin_unlock_irqrestore(&sta->flaglock, irqfl);
376
377 return ret;
378}
379
380static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
381 const u32 flags)
382{
383 u32 ret;
384 unsigned long irqfl;
385
386 spin_lock_irqsave(&sta->flaglock, irqfl);
387 ret = sta->flags & flags;
388 sta->flags &= ~flags;
389 spin_unlock_irqrestore(&sta->flaglock, irqfl);
390
391 return ret;
392}
393
394static inline u32 get_sta_flags(struct sta_info *sta)
395{
396 u32 ret;
397 unsigned long irqfl;
398
399 spin_lock_irqsave(&sta->flaglock, irqfl);
400 ret = sta->flags;
401 spin_unlock_irqrestore(&sta->flaglock, irqfl);
402
403 return ret;
404}
405
302 406
303/* Maximum number of concurrently registered stations */ 407/* Maximum number of concurrently registered stations */
304#define MAX_STA_COUNT 2007 408#define MAX_STA_COUNT 2007
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 09093da24af6..995f7af3d25e 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -6,25 +6,23 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9
10#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/bitops.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <asm/unaligned.h>
13 14
14#include <net/mac80211.h> 15#include <net/mac80211.h>
15#include "key.h" 16#include "key.h"
16#include "tkip.h" 17#include "tkip.h"
17#include "wep.h" 18#include "wep.h"
18 19
19
20/* TKIP key mixing functions */
21
22
23#define PHASE1_LOOP_COUNT 8 20#define PHASE1_LOOP_COUNT 8
24 21
25 22/*
26/* 2-byte by 2-byte subset of the full AES S-box table; second part of this 23 * 2-byte by 2-byte subset of the full AES S-box table; second part of this
27 * table is identical to first part but byte-swapped */ 24 * table is identical to first part but byte-swapped
25 */
28static const u16 tkip_sbox[256] = 26static const u16 tkip_sbox[256] =
29{ 27{
30 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 28 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
@@ -61,84 +59,54 @@ static const u16 tkip_sbox[256] =
61 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, 59 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
62}; 60};
63 61
64 62static u16 tkipS(u16 val)
65static inline u16 Mk16(u8 x, u8 y)
66{ 63{
67 return ((u16) x << 8) | (u16) y; 64 return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]);
68} 65}
69 66
70 67static u8 *write_tkip_iv(u8 *pos, u16 iv16)
71static inline u8 Hi8(u16 v)
72{
73 return v >> 8;
74}
75
76
77static inline u8 Lo8(u16 v)
78{
79 return v & 0xff;
80}
81
82
83static inline u16 Hi16(u32 v)
84{
85 return v >> 16;
86}
87
88
89static inline u16 Lo16(u32 v)
90{
91 return v & 0xffff;
92}
93
94
95static inline u16 RotR1(u16 v)
96{
97 return (v >> 1) | ((v & 0x0001) << 15);
98}
99
100
101static inline u16 tkip_S(u16 val)
102{ 68{
103 u16 a = tkip_sbox[Hi8(val)]; 69 *pos++ = iv16 >> 8;
104 70 *pos++ = ((iv16 >> 8) | 0x20) & 0x7f;
105 return tkip_sbox[Lo8(val)] ^ Hi8(a) ^ (Lo8(a) << 8); 71 *pos++ = iv16 & 0xFF;
72 return pos;
106} 73}
107 74
108 75/*
109 76 * P1K := Phase1(TA, TK, TSC)
110/* P1K := Phase1(TA, TK, TSC)
111 * TA = transmitter address (48 bits) 77 * TA = transmitter address (48 bits)
112 * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) 78 * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits)
113 * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) 79 * TSC = TKIP sequence counter (48 bits, only 32 msb bits used)
114 * P1K: 80 bits 80 * P1K: 80 bits
115 */ 81 */
116static void tkip_mixing_phase1(const u8 *ta, const u8 *tk, u32 tsc_IV32, 82static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
117 u16 *p1k) 83 const u8 *ta, u32 tsc_IV32)
118{ 84{
119 int i, j; 85 int i, j;
86 u16 *p1k = ctx->p1k;
120 87
121 p1k[0] = Lo16(tsc_IV32); 88 p1k[0] = tsc_IV32 & 0xFFFF;
122 p1k[1] = Hi16(tsc_IV32); 89 p1k[1] = tsc_IV32 >> 16;
123 p1k[2] = Mk16(ta[1], ta[0]); 90 p1k[2] = get_unaligned_le16(ta + 0);
124 p1k[3] = Mk16(ta[3], ta[2]); 91 p1k[3] = get_unaligned_le16(ta + 2);
125 p1k[4] = Mk16(ta[5], ta[4]); 92 p1k[4] = get_unaligned_le16(ta + 4);
126 93
127 for (i = 0; i < PHASE1_LOOP_COUNT; i++) { 94 for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
128 j = 2 * (i & 1); 95 j = 2 * (i & 1);
129 p1k[0] += tkip_S(p1k[4] ^ Mk16(tk[ 1 + j], tk[ 0 + j])); 96 p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j));
130 p1k[1] += tkip_S(p1k[0] ^ Mk16(tk[ 5 + j], tk[ 4 + j])); 97 p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j));
131 p1k[2] += tkip_S(p1k[1] ^ Mk16(tk[ 9 + j], tk[ 8 + j])); 98 p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j));
132 p1k[3] += tkip_S(p1k[2] ^ Mk16(tk[13 + j], tk[12 + j])); 99 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
133 p1k[4] += tkip_S(p1k[3] ^ Mk16(tk[ 1 + j], tk[ 0 + j])) + i; 100 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
134 } 101 }
102 ctx->initialized = 1;
135} 103}
136 104
137 105static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
138static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16, 106 u16 tsc_IV16, u8 *rc4key)
139 u8 *rc4key)
140{ 107{
141 u16 ppk[6]; 108 u16 ppk[6];
109 const u16 *p1k = ctx->p1k;
142 int i; 110 int i;
143 111
144 ppk[0] = p1k[0]; 112 ppk[0] = p1k[0];
@@ -148,70 +116,35 @@ static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16,
148 ppk[4] = p1k[4]; 116 ppk[4] = p1k[4];
149 ppk[5] = p1k[4] + tsc_IV16; 117 ppk[5] = p1k[4] + tsc_IV16;
150 118
151 ppk[0] += tkip_S(ppk[5] ^ Mk16(tk[ 1], tk[ 0])); 119 ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0));
152 ppk[1] += tkip_S(ppk[0] ^ Mk16(tk[ 3], tk[ 2])); 120 ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2));
153 ppk[2] += tkip_S(ppk[1] ^ Mk16(tk[ 5], tk[ 4])); 121 ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4));
154 ppk[3] += tkip_S(ppk[2] ^ Mk16(tk[ 7], tk[ 6])); 122 ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6));
155 ppk[4] += tkip_S(ppk[3] ^ Mk16(tk[ 9], tk[ 8])); 123 ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8));
156 ppk[5] += tkip_S(ppk[4] ^ Mk16(tk[11], tk[10])); 124 ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10));
157 ppk[0] += RotR1(ppk[5] ^ Mk16(tk[13], tk[12])); 125 ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1);
158 ppk[1] += RotR1(ppk[0] ^ Mk16(tk[15], tk[14])); 126 ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1);
159 ppk[2] += RotR1(ppk[1]); 127 ppk[2] += ror16(ppk[1], 1);
160 ppk[3] += RotR1(ppk[2]); 128 ppk[3] += ror16(ppk[2], 1);
161 ppk[4] += RotR1(ppk[3]); 129 ppk[4] += ror16(ppk[3], 1);
162 ppk[5] += RotR1(ppk[4]); 130 ppk[5] += ror16(ppk[4], 1);
163 131
164 rc4key[0] = Hi8(tsc_IV16); 132 rc4key = write_tkip_iv(rc4key, tsc_IV16);
165 rc4key[1] = (Hi8(tsc_IV16) | 0x20) & 0x7f; 133 *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF;
166 rc4key[2] = Lo8(tsc_IV16); 134
167 rc4key[3] = Lo8((ppk[5] ^ Mk16(tk[1], tk[0])) >> 1); 135 for (i = 0; i < 6; i++)
168 136 put_unaligned_le16(ppk[i], rc4key + 2 * i);
169 for (i = 0; i < 6; i++) {
170 rc4key[4 + 2 * i] = Lo8(ppk[i]);
171 rc4key[5 + 2 * i] = Hi8(ppk[i]);
172 }
173} 137}
174 138
175
176/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets 139/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
177 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of 140 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of
178 * the packet payload). */ 141 * the packet payload). */
179u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, 142u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16)
180 u8 iv0, u8 iv1, u8 iv2)
181{ 143{
182 *pos++ = iv0; 144 pos = write_tkip_iv(pos, iv16);
183 *pos++ = iv1;
184 *pos++ = iv2;
185 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; 145 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */;
186 *pos++ = key->u.tkip.iv32 & 0xff; 146 put_unaligned_le32(key->u.tkip.tx.iv32, pos);
187 *pos++ = (key->u.tkip.iv32 >> 8) & 0xff; 147 return pos + 4;
188 *pos++ = (key->u.tkip.iv32 >> 16) & 0xff;
189 *pos++ = (key->u.tkip.iv32 >> 24) & 0xff;
190 return pos;
191}
192
193
194void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta,
195 u16 *phase1key)
196{
197 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
198 key->u.tkip.iv32, phase1key);
199}
200
201void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta,
202 u8 *rc4key)
203{
204 /* Calculate per-packet key */
205 if (key->u.tkip.iv16 == 0 || !key->u.tkip.tx_initialized) {
206 /* IV16 wrapped around - perform TKIP phase 1 */
207 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
208 key->u.tkip.iv32, key->u.tkip.p1k);
209 key->u.tkip.tx_initialized = 1;
210 }
211
212 tkip_mixing_phase2(key->u.tkip.p1k,
213 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
214 key->u.tkip.iv16, rc4key);
215} 148}
216 149
217void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, 150void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
@@ -220,48 +153,44 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
220{ 153{
221 struct ieee80211_key *key = (struct ieee80211_key *) 154 struct ieee80211_key *key = (struct ieee80211_key *)
222 container_of(keyconf, struct ieee80211_key, conf); 155 container_of(keyconf, struct ieee80211_key, conf);
223 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
224 u8 *data = (u8 *) hdr; 157 u8 *data;
225 u16 fc = le16_to_cpu(hdr->frame_control); 158 const u8 *tk;
226 int hdr_len = ieee80211_get_hdrlen(fc); 159 struct tkip_ctx *ctx;
227 u8 *ta = hdr->addr2;
228 u16 iv16; 160 u16 iv16;
229 u32 iv32; 161 u32 iv32;
230 162
231 iv16 = data[hdr_len] << 8; 163 data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
232 iv16 += data[hdr_len + 2]; 164 iv16 = data[2] | (data[0] << 8);
233 iv32 = data[hdr_len + 4] | (data[hdr_len + 5] << 8) | 165 iv32 = get_unaligned_le32(&data[4]);
234 (data[hdr_len + 6] << 16) | (data[hdr_len + 7] << 24); 166
167 tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
168 ctx = &key->u.tkip.tx;
235 169
236#ifdef CONFIG_TKIP_DEBUG 170#ifdef CONFIG_MAC80211_TKIP_DEBUG
237 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n", 171 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n",
238 iv16, iv32); 172 iv16, iv32);
239 173
240 if (iv32 != key->u.tkip.iv32) { 174 if (iv32 != ctx->iv32) {
241 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n", 175 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n",
242 iv32, key->u.tkip.iv32); 176 iv32, ctx->iv32);
243 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a " 177 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a "
244 "fragmented packet\n"); 178 "fragmented packet\n");
245 } 179 }
246#endif /* CONFIG_TKIP_DEBUG */ 180#endif
247 181
248 /* Update the p1k only when the iv16 in the packet wraps around, this 182 /* Update the p1k only when the iv16 in the packet wraps around, this
249 * might occur after the wrap around of iv16 in the key in case of 183 * might occur after the wrap around of iv16 in the key in case of
250 * fragmented packets. */ 184 * fragmented packets. */
251 if (iv16 == 0 || !key->u.tkip.tx_initialized) { 185 if (iv16 == 0 || !ctx->initialized)
252 /* IV16 wrapped around - perform TKIP phase 1 */ 186 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
253 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
254 iv32, key->u.tkip.p1k);
255 key->u.tkip.tx_initialized = 1;
256 }
257 187
258 if (type == IEEE80211_TKIP_P1_KEY) { 188 if (type == IEEE80211_TKIP_P1_KEY) {
259 memcpy(outkey, key->u.tkip.p1k, sizeof(u16) * 5); 189 memcpy(outkey, ctx->p1k, sizeof(u16) * 5);
260 return; 190 return;
261 } 191 }
262 192
263 tkip_mixing_phase2(key->u.tkip.p1k, 193 tkip_mixing_phase2(tk, ctx, iv16, outkey);
264 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], iv16, outkey);
265} 194}
266EXPORT_SYMBOL(ieee80211_get_tkip_key); 195EXPORT_SYMBOL(ieee80211_get_tkip_key);
267 196
@@ -275,13 +204,19 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
275 u8 *pos, size_t payload_len, u8 *ta) 204 u8 *pos, size_t payload_len, u8 *ta)
276{ 205{
277 u8 rc4key[16]; 206 u8 rc4key[16];
207 struct tkip_ctx *ctx = &key->u.tkip.tx;
208 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
209
210 /* Calculate per-packet key */
211 if (ctx->iv16 == 0 || !ctx->initialized)
212 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
213
214 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
278 215
279 ieee80211_tkip_gen_rc4key(key, ta, rc4key); 216 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
280 pos = ieee80211_tkip_add_iv(pos, key, rc4key[0], rc4key[1], rc4key[2]);
281 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 217 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
282} 218}
283 219
284
285/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the 220/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
286 * beginning of the buffer containing IEEE 802.11 header payload, i.e., 221 * beginning of the buffer containing IEEE 802.11 header payload, i.e.,
287 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the 222 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
@@ -296,15 +231,16 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
296 u32 iv16; 231 u32 iv16;
297 u8 rc4key[16], keyid, *pos = payload; 232 u8 rc4key[16], keyid, *pos = payload;
298 int res; 233 int res;
234 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
299 235
300 if (payload_len < 12) 236 if (payload_len < 12)
301 return -1; 237 return -1;
302 238
303 iv16 = (pos[0] << 8) | pos[2]; 239 iv16 = (pos[0] << 8) | pos[2];
304 keyid = pos[3]; 240 keyid = pos[3];
305 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); 241 iv32 = get_unaligned_le32(pos + 4);
306 pos += 8; 242 pos += 8;
307#ifdef CONFIG_TKIP_DEBUG 243#ifdef CONFIG_MAC80211_TKIP_DEBUG
308 { 244 {
309 int i; 245 int i;
310 printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len); 246 printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len);
@@ -314,7 +250,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
314 printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n", 250 printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n",
315 iv16, iv32); 251 iv16, iv32);
316 } 252 }
317#endif /* CONFIG_TKIP_DEBUG */ 253#endif
318 254
319 if (!(keyid & (1 << 5))) 255 if (!(keyid & (1 << 5)))
320 return TKIP_DECRYPT_NO_EXT_IV; 256 return TKIP_DECRYPT_NO_EXT_IV;
@@ -322,50 +258,48 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
322 if ((keyid >> 6) != key->conf.keyidx) 258 if ((keyid >> 6) != key->conf.keyidx)
323 return TKIP_DECRYPT_INVALID_KEYIDX; 259 return TKIP_DECRYPT_INVALID_KEYIDX;
324 260
325 if (key->u.tkip.rx_initialized[queue] && 261 if (key->u.tkip.rx[queue].initialized &&
326 (iv32 < key->u.tkip.iv32_rx[queue] || 262 (iv32 < key->u.tkip.rx[queue].iv32 ||
327 (iv32 == key->u.tkip.iv32_rx[queue] && 263 (iv32 == key->u.tkip.rx[queue].iv32 &&
328 iv16 <= key->u.tkip.iv16_rx[queue]))) { 264 iv16 <= key->u.tkip.rx[queue].iv16))) {
329#ifdef CONFIG_TKIP_DEBUG 265#ifdef CONFIG_MAC80211_TKIP_DEBUG
330 DECLARE_MAC_BUF(mac); 266 DECLARE_MAC_BUF(mac);
331 printk(KERN_DEBUG "TKIP replay detected for RX frame from " 267 printk(KERN_DEBUG "TKIP replay detected for RX frame from "
332 "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", 268 "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n",
333 print_mac(mac, ta), 269 print_mac(mac, ta),
334 iv32, iv16, key->u.tkip.iv32_rx[queue], 270 iv32, iv16, key->u.tkip.rx[queue].iv32,
335 key->u.tkip.iv16_rx[queue]); 271 key->u.tkip.rx[queue].iv16);
336#endif /* CONFIG_TKIP_DEBUG */ 272#endif
337 return TKIP_DECRYPT_REPLAY; 273 return TKIP_DECRYPT_REPLAY;
338 } 274 }
339 275
340 if (only_iv) { 276 if (only_iv) {
341 res = TKIP_DECRYPT_OK; 277 res = TKIP_DECRYPT_OK;
342 key->u.tkip.rx_initialized[queue] = 1; 278 key->u.tkip.rx[queue].initialized = 1;
343 goto done; 279 goto done;
344 } 280 }
345 281
346 if (!key->u.tkip.rx_initialized[queue] || 282 if (!key->u.tkip.rx[queue].initialized ||
347 key->u.tkip.iv32_rx[queue] != iv32) { 283 key->u.tkip.rx[queue].iv32 != iv32) {
348 key->u.tkip.rx_initialized[queue] = 1;
349 /* IV16 wrapped around - perform TKIP phase 1 */ 284 /* IV16 wrapped around - perform TKIP phase 1 */
350 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], 285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
351 iv32, key->u.tkip.p1k_rx[queue]); 286#ifdef CONFIG_MAC80211_TKIP_DEBUG
352#ifdef CONFIG_TKIP_DEBUG
353 { 287 {
354 int i; 288 int i;
289 u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY;
355 DECLARE_MAC_BUF(mac); 290 DECLARE_MAC_BUF(mac);
356 printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%s" 291 printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%s"
357 " TK=", print_mac(mac, ta)); 292 " TK=", print_mac(mac, ta));
358 for (i = 0; i < 16; i++) 293 for (i = 0; i < 16; i++)
359 printk("%02x ", 294 printk("%02x ",
360 key->conf.key[ 295 key->conf.key[key_offset + i]);
361 ALG_TKIP_TEMP_ENCR_KEY + i]);
362 printk("\n"); 296 printk("\n");
363 printk(KERN_DEBUG "TKIP decrypt: P1K="); 297 printk(KERN_DEBUG "TKIP decrypt: P1K=");
364 for (i = 0; i < 5; i++) 298 for (i = 0; i < 5; i++)
365 printk("%04x ", key->u.tkip.p1k_rx[queue][i]); 299 printk("%04x ", key->u.tkip.rx[queue].p1k[i]);
366 printk("\n"); 300 printk("\n");
367 } 301 }
368#endif /* CONFIG_TKIP_DEBUG */ 302#endif
369 if (key->local->ops->update_tkip_key && 303 if (key->local->ops->update_tkip_key &&
370 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 304 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
371 u8 bcast[ETH_ALEN] = 305 u8 bcast[ETH_ALEN] =
@@ -377,14 +311,12 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
377 311
378 key->local->ops->update_tkip_key( 312 key->local->ops->update_tkip_key(
379 local_to_hw(key->local), &key->conf, 313 local_to_hw(key->local), &key->conf,
380 sta_addr, iv32, key->u.tkip.p1k_rx[queue]); 314 sta_addr, iv32, key->u.tkip.rx[queue].p1k);
381 } 315 }
382 } 316 }
383 317
384 tkip_mixing_phase2(key->u.tkip.p1k_rx[queue], 318 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
385 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], 319#ifdef CONFIG_MAC80211_TKIP_DEBUG
386 iv16, rc4key);
387#ifdef CONFIG_TKIP_DEBUG
388 { 320 {
389 int i; 321 int i;
390 printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key="); 322 printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key=");
@@ -392,7 +324,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
392 printk("%02x ", rc4key[i]); 324 printk("%02x ", rc4key[i]);
393 printk("\n"); 325 printk("\n");
394 } 326 }
395#endif /* CONFIG_TKIP_DEBUG */ 327#endif
396 328
397 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); 329 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
398 done: 330 done:
@@ -409,5 +341,3 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
409 341
410 return res; 342 return res;
411} 343}
412
413
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index b7c2ee763d9d..d4714383f5fc 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -13,12 +13,8 @@
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include "key.h" 14#include "key.h"
15 15
16u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16);
17 u8 iv0, u8 iv1, u8 iv2); 17
18void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta,
19 u16 *phase1key);
20void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta,
21 u8 *rc4key);
22void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 18void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
23 struct ieee80211_key *key, 19 struct ieee80211_key *key,
24 u8 *pos, size_t payload_len, u8 *ta); 20 u8 *pos, size_t payload_len, u8 *ta);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c80d5899f279..0fbadd8b983c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -38,23 +38,12 @@
38 38
39/* misc utils */ 39/* misc utils */
40 40
41static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdata,
42 struct ieee80211_hdr *hdr)
43{
44 /* Set the sequence number for this frame. */
45 hdr->seq_ctrl = cpu_to_le16(sdata->sequence);
46
47 /* Increase the sequence number. */
48 sdata->sequence = (sdata->sequence + 0x10) & IEEE80211_SCTL_SEQ;
49}
50
51#ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP 41#ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP
52static void ieee80211_dump_frame(const char *ifname, const char *title, 42static void ieee80211_dump_frame(const char *ifname, const char *title,
53 const struct sk_buff *skb) 43 const struct sk_buff *skb)
54{ 44{
55 const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 45 const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
56 u16 fc; 46 unsigned int hdrlen;
57 int hdrlen;
58 DECLARE_MAC_BUF(mac); 47 DECLARE_MAC_BUF(mac);
59 48
60 printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); 49 printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len);
@@ -63,13 +52,12 @@ static void ieee80211_dump_frame(const char *ifname, const char *title,
63 return; 52 return;
64 } 53 }
65 54
66 fc = le16_to_cpu(hdr->frame_control); 55 hdrlen = ieee80211_hdrlen(hdr->frame_control);
67 hdrlen = ieee80211_get_hdrlen(fc);
68 if (hdrlen > skb->len) 56 if (hdrlen > skb->len)
69 hdrlen = skb->len; 57 hdrlen = skb->len;
70 if (hdrlen >= 4) 58 if (hdrlen >= 4)
71 printk(" FC=0x%04x DUR=0x%04x", 59 printk(" FC=0x%04x DUR=0x%04x",
72 fc, le16_to_cpu(hdr->duration_id)); 60 le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id));
73 if (hdrlen >= 10) 61 if (hdrlen >= 10)
74 printk(" A1=%s", print_mac(mac, hdr->addr1)); 62 printk(" A1=%s", print_mac(mac, hdr->addr1));
75 if (hdrlen >= 16) 63 if (hdrlen >= 16)
@@ -87,15 +75,16 @@ static inline void ieee80211_dump_frame(const char *ifname, const char *title,
87} 75}
88#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ 76#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
89 77
90static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 78static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
91 int next_frag_len) 79 int next_frag_len)
92{ 80{
93 int rate, mrate, erp, dur, i; 81 int rate, mrate, erp, dur, i;
94 struct ieee80211_rate *txrate = tx->rate; 82 struct ieee80211_rate *txrate;
95 struct ieee80211_local *local = tx->local; 83 struct ieee80211_local *local = tx->local;
96 struct ieee80211_supported_band *sband; 84 struct ieee80211_supported_band *sband;
97 85
98 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 86 sband = local->hw.wiphy->bands[tx->channel->band];
87 txrate = &sband->bitrates[tx->rate_idx];
99 88
100 erp = 0; 89 erp = 0;
101 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 90 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
@@ -139,7 +128,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
139 128
140 /* data/mgmt */ 129 /* data/mgmt */
141 if (0 /* FIX: data/mgmt during CFP */) 130 if (0 /* FIX: data/mgmt during CFP */)
142 return 32768; 131 return cpu_to_le16(32768);
143 132
144 if (group_addr) /* Group address as the destination - no ACK */ 133 if (group_addr) /* Group address as the destination - no ACK */
145 return 0; 134 return 0;
@@ -209,19 +198,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
209 tx->sdata->bss_conf.use_short_preamble); 198 tx->sdata->bss_conf.use_short_preamble);
210 } 199 }
211 200
212 return dur; 201 return cpu_to_le16(dur);
213}
214
215static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local,
216 int queue)
217{
218 return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]);
219}
220
221static inline int __ieee80211_queue_pending(const struct ieee80211_local *local,
222 int queue)
223{
224 return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]);
225} 202}
226 203
227static int inline is_ieee80211_device(struct net_device *dev, 204static int inline is_ieee80211_device(struct net_device *dev,
@@ -233,16 +210,16 @@ static int inline is_ieee80211_device(struct net_device *dev,
233 210
234/* tx handlers */ 211/* tx handlers */
235 212
236static ieee80211_tx_result 213static ieee80211_tx_result debug_noinline
237ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 214ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
238{ 215{
239#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 216#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
240 struct sk_buff *skb = tx->skb; 217 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
241 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
242#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 218#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
243 u32 sta_flags; 220 u32 sta_flags;
244 221
245 if (unlikely(tx->flags & IEEE80211_TX_INJECTED)) 222 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
246 return TX_CONTINUE; 223 return TX_CONTINUE;
247 224
248 if (unlikely(tx->local->sta_sw_scanning) && 225 if (unlikely(tx->local->sta_sw_scanning) &&
@@ -256,7 +233,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
256 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 233 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
257 return TX_CONTINUE; 234 return TX_CONTINUE;
258 235
259 sta_flags = tx->sta ? tx->sta->flags : 0; 236 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
260 237
261 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 238 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
262 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 239 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
@@ -287,17 +264,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
287 return TX_CONTINUE; 264 return TX_CONTINUE;
288} 265}
289 266
290static ieee80211_tx_result
291ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
292{
293 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
294
295 if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24)
296 ieee80211_include_sequence(tx->sdata, hdr);
297
298 return TX_CONTINUE;
299}
300
301/* This function is called whenever the AP is about to exceed the maximum limit 267/* This function is called whenever the AP is about to exceed the maximum limit
302 * of buffered frames for power saving STAs. This situation should not really 268 * of buffered frames for power saving STAs. This situation should not really
303 * happen often during normal operation, so dropping the oldest buffered packet 269 * happen often during normal operation, so dropping the oldest buffered packet
@@ -316,8 +282,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
316 282
317 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 283 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
318 struct ieee80211_if_ap *ap; 284 struct ieee80211_if_ap *ap;
319 if (sdata->dev == local->mdev || 285 if (sdata->vif.type != IEEE80211_IF_TYPE_AP)
320 sdata->vif.type != IEEE80211_IF_TYPE_AP)
321 continue; 286 continue;
322 ap = &sdata->u.ap; 287 ap = &sdata->u.ap;
323 skb = skb_dequeue(&ap->ps_bc_buf); 288 skb = skb_dequeue(&ap->ps_bc_buf);
@@ -340,13 +305,17 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
340 rcu_read_unlock(); 305 rcu_read_unlock();
341 306
342 local->total_ps_buffered = total; 307 local->total_ps_buffered = total;
308#ifdef MAC80211_VERBOSE_PS_DEBUG
343 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", 309 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n",
344 wiphy_name(local->hw.wiphy), purged); 310 wiphy_name(local->hw.wiphy), purged);
311#endif
345} 312}
346 313
347static ieee80211_tx_result 314static ieee80211_tx_result
348ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) 315ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
349{ 316{
317 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
318
350 /* 319 /*
351 * broadcast/multicast frame 320 * broadcast/multicast frame
352 * 321 *
@@ -355,8 +324,12 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
355 * This is done either by the hardware or us. 324 * This is done either by the hardware or us.
356 */ 325 */
357 326
358 /* not AP/IBSS or ordered frame */ 327 /* powersaving STAs only in AP/VLAN mode */
359 if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER)) 328 if (!tx->sdata->bss)
329 return TX_CONTINUE;
330
331 /* no buffering for ordered frames */
332 if (tx->fc & IEEE80211_FCTL_ORDER)
360 return TX_CONTINUE; 333 return TX_CONTINUE;
361 334
362 /* no stations in PS mode */ 335 /* no stations in PS mode */
@@ -369,11 +342,13 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
369 purge_old_ps_buffers(tx->local); 342 purge_old_ps_buffers(tx->local);
370 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= 343 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >=
371 AP_MAX_BC_BUFFER) { 344 AP_MAX_BC_BUFFER) {
345#ifdef MAC80211_VERBOSE_PS_DEBUG
372 if (net_ratelimit()) { 346 if (net_ratelimit()) {
373 printk(KERN_DEBUG "%s: BC TX buffer full - " 347 printk(KERN_DEBUG "%s: BC TX buffer full - "
374 "dropping the oldest frame\n", 348 "dropping the oldest frame\n",
375 tx->dev->name); 349 tx->dev->name);
376 } 350 }
351#endif
377 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 352 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
378 } else 353 } else
379 tx->local->total_ps_buffered++; 354 tx->local->total_ps_buffered++;
@@ -382,7 +357,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
382 } 357 }
383 358
384 /* buffered in hardware */ 359 /* buffered in hardware */
385 tx->control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; 360 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
386 361
387 return TX_CONTINUE; 362 return TX_CONTINUE;
388} 363}
@@ -391,6 +366,8 @@ static ieee80211_tx_result
391ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) 366ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
392{ 367{
393 struct sta_info *sta = tx->sta; 368 struct sta_info *sta = tx->sta;
369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
370 u32 staflags;
394 DECLARE_MAC_BUF(mac); 371 DECLARE_MAC_BUF(mac);
395 372
396 if (unlikely(!sta || 373 if (unlikely(!sta ||
@@ -398,9 +375,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
398 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) 375 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
399 return TX_CONTINUE; 376 return TX_CONTINUE;
400 377
401 if (unlikely((sta->flags & WLAN_STA_PS) && 378 staflags = get_sta_flags(sta);
402 !(sta->flags & WLAN_STA_PSPOLL))) { 379
403 struct ieee80211_tx_packet_data *pkt_data; 380 if (unlikely((staflags & WLAN_STA_PS) &&
381 !(staflags & WLAN_STA_PSPOLL))) {
404#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 382#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
405 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " 383 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
406 "before %d)\n", 384 "before %d)\n",
@@ -411,11 +389,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
411 purge_old_ps_buffers(tx->local); 389 purge_old_ps_buffers(tx->local);
412 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { 390 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
413 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); 391 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
392#ifdef MAC80211_VERBOSE_PS_DEBUG
414 if (net_ratelimit()) { 393 if (net_ratelimit()) {
415 printk(KERN_DEBUG "%s: STA %s TX " 394 printk(KERN_DEBUG "%s: STA %s TX "
416 "buffer full - dropping oldest frame\n", 395 "buffer full - dropping oldest frame\n",
417 tx->dev->name, print_mac(mac, sta->addr)); 396 tx->dev->name, print_mac(mac, sta->addr));
418 } 397 }
398#endif
419 dev_kfree_skb(old); 399 dev_kfree_skb(old);
420 } else 400 } else
421 tx->local->total_ps_buffered++; 401 tx->local->total_ps_buffered++;
@@ -424,24 +404,23 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
424 if (skb_queue_empty(&sta->ps_tx_buf)) 404 if (skb_queue_empty(&sta->ps_tx_buf))
425 sta_info_set_tim_bit(sta); 405 sta_info_set_tim_bit(sta);
426 406
427 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; 407 info->control.jiffies = jiffies;
428 pkt_data->jiffies = jiffies;
429 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 408 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
430 return TX_QUEUED; 409 return TX_QUEUED;
431 } 410 }
432#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 411#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
433 else if (unlikely(sta->flags & WLAN_STA_PS)) { 412 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) {
434 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " 413 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll "
435 "set -> send frame\n", tx->dev->name, 414 "set -> send frame\n", tx->dev->name,
436 print_mac(mac, sta->addr)); 415 print_mac(mac, sta->addr));
437 } 416 }
438#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 417#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
439 sta->flags &= ~WLAN_STA_PSPOLL; 418 clear_sta_flags(sta, WLAN_STA_PSPOLL);
440 419
441 return TX_CONTINUE; 420 return TX_CONTINUE;
442} 421}
443 422
444static ieee80211_tx_result 423static ieee80211_tx_result debug_noinline
445ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 424ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
446{ 425{
447 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 426 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
@@ -453,21 +432,22 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
453 return ieee80211_tx_h_multicast_ps_buf(tx); 432 return ieee80211_tx_h_multicast_ps_buf(tx);
454} 433}
455 434
456static ieee80211_tx_result 435static ieee80211_tx_result debug_noinline
457ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 436ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
458{ 437{
459 struct ieee80211_key *key; 438 struct ieee80211_key *key;
439 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
460 u16 fc = tx->fc; 440 u16 fc = tx->fc;
461 441
462 if (unlikely(tx->control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 442 if (unlikely(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
463 tx->key = NULL; 443 tx->key = NULL;
464 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 444 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
465 tx->key = key; 445 tx->key = key;
466 else if ((key = rcu_dereference(tx->sdata->default_key))) 446 else if ((key = rcu_dereference(tx->sdata->default_key)))
467 tx->key = key; 447 tx->key = key;
468 else if (tx->sdata->drop_unencrypted && 448 else if (tx->sdata->drop_unencrypted &&
469 !(tx->control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && 449 !(info->flags & IEEE80211_TX_CTL_EAPOL_FRAME) &&
470 !(tx->flags & IEEE80211_TX_INJECTED)) { 450 !(info->flags & IEEE80211_TX_CTL_INJECTED)) {
471 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 451 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
472 return TX_DROP; 452 return TX_DROP;
473 } else 453 } else
@@ -496,15 +476,197 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
496 } 476 }
497 477
498 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 478 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
499 tx->control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 479 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
500 480
501 return TX_CONTINUE; 481 return TX_CONTINUE;
502} 482}
503 483
504static ieee80211_tx_result 484static ieee80211_tx_result debug_noinline
485ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
486{
487 struct rate_selection rsel;
488 struct ieee80211_supported_band *sband;
489 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
490
491 sband = tx->local->hw.wiphy->bands[tx->channel->band];
492
493 if (likely(tx->rate_idx < 0)) {
494 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
495 tx->rate_idx = rsel.rate_idx;
496 if (unlikely(rsel.probe_idx >= 0)) {
497 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
498 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
499 info->control.alt_retry_rate_idx = tx->rate_idx;
500 tx->rate_idx = rsel.probe_idx;
501 } else
502 info->control.alt_retry_rate_idx = -1;
503
504 if (unlikely(tx->rate_idx < 0))
505 return TX_DROP;
506 } else
507 info->control.alt_retry_rate_idx = -1;
508
509 if (tx->sdata->bss_conf.use_cts_prot &&
510 (tx->flags & IEEE80211_TX_FRAGMENTED) && (rsel.nonerp_idx >= 0)) {
511 tx->last_frag_rate_idx = tx->rate_idx;
512 if (rsel.probe_idx >= 0)
513 tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG;
514 else
515 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
516 tx->rate_idx = rsel.nonerp_idx;
517 info->tx_rate_idx = rsel.nonerp_idx;
518 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
519 } else {
520 tx->last_frag_rate_idx = tx->rate_idx;
521 info->tx_rate_idx = tx->rate_idx;
522 }
523 info->tx_rate_idx = tx->rate_idx;
524
525 return TX_CONTINUE;
526}
527
528static ieee80211_tx_result debug_noinline
529ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
530{
531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
532 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
533 struct ieee80211_supported_band *sband;
534
535 sband = tx->local->hw.wiphy->bands[tx->channel->band];
536
537 if (tx->sta)
538 info->control.aid = tx->sta->aid;
539
540 if (!info->control.retry_limit) {
541 if (!is_multicast_ether_addr(hdr->addr1)) {
542 int len = min_t(int, tx->skb->len + FCS_LEN,
543 tx->local->fragmentation_threshold);
544 if (len > tx->local->rts_threshold
545 && tx->local->rts_threshold <
546 IEEE80211_MAX_RTS_THRESHOLD) {
547 info->flags |= IEEE80211_TX_CTL_USE_RTS_CTS;
548 info->flags |=
549 IEEE80211_TX_CTL_LONG_RETRY_LIMIT;
550 info->control.retry_limit =
551 tx->local->long_retry_limit;
552 } else {
553 info->control.retry_limit =
554 tx->local->short_retry_limit;
555 }
556 } else {
557 info->control.retry_limit = 1;
558 }
559 }
560
561 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
562 /* Do not use multiple retry rates when sending fragmented
563 * frames.
564 * TODO: The last fragment could still use multiple retry
565 * rates. */
566 info->control.alt_retry_rate_idx = -1;
567 }
568
569 /* Use CTS protection for unicast frames sent using extended rates if
570 * there are associated non-ERP stations and RTS/CTS is not configured
571 * for the frame. */
572 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
573 (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_ERP_G) &&
574 (tx->flags & IEEE80211_TX_UNICAST) &&
575 tx->sdata->bss_conf.use_cts_prot &&
576 !(info->flags & IEEE80211_TX_CTL_USE_RTS_CTS))
577 info->flags |= IEEE80211_TX_CTL_USE_CTS_PROTECT;
578
579 /* Transmit data frames using short preambles if the driver supports
580 * short preambles at the selected rate and short preambles are
581 * available on the network at the current point in time. */
582 if (ieee80211_is_data(hdr->frame_control) &&
583 (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
584 tx->sdata->bss_conf.use_short_preamble &&
585 (!tx->sta || test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))) {
586 info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
587 }
588
589 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
590 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) {
591 struct ieee80211_rate *rate;
592 s8 baserate = -1;
593 int idx;
594
595 /* Do not use multiple retry rates when using RTS/CTS */
596 info->control.alt_retry_rate_idx = -1;
597
598 /* Use min(data rate, max base rate) as CTS/RTS rate */
599 rate = &sband->bitrates[tx->rate_idx];
600
601 for (idx = 0; idx < sband->n_bitrates; idx++) {
602 if (sband->bitrates[idx].bitrate > rate->bitrate)
603 continue;
604 if (tx->sdata->basic_rates & BIT(idx) &&
605 (baserate < 0 ||
606 (sband->bitrates[baserate].bitrate
607 < sband->bitrates[idx].bitrate)))
608 baserate = idx;
609 }
610
611 if (baserate >= 0)
612 info->control.rts_cts_rate_idx = baserate;
613 else
614 info->control.rts_cts_rate_idx = 0;
615 }
616
617 if (tx->sta)
618 info->control.aid = tx->sta->aid;
619
620 return TX_CONTINUE;
621}
622
623static ieee80211_tx_result debug_noinline
624ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
625{
626 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
627 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
628 u16 *seq;
629 u8 *qc;
630 int tid;
631
632 /* only for injected frames */
633 if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
634 return TX_CONTINUE;
635
636 if (ieee80211_hdrlen(hdr->frame_control) < 24)
637 return TX_CONTINUE;
638
639 if (!ieee80211_is_data_qos(hdr->frame_control)) {
640 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
641 return TX_CONTINUE;
642 }
643
644 /*
645 * This should be true for injected/management frames only, for
646 * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
647 * above since they are not QoS-data frames.
648 */
649 if (!tx->sta)
650 return TX_CONTINUE;
651
652 /* include per-STA, per-TID sequence counter */
653
654 qc = ieee80211_get_qos_ctl(hdr);
655 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
656 seq = &tx->sta->tid_seq[tid];
657
658 hdr->seq_ctrl = cpu_to_le16(*seq);
659
660 /* Increase the sequence number. */
661 *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
662
663 return TX_CONTINUE;
664}
665
666static ieee80211_tx_result debug_noinline
505ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) 667ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
506{ 668{
507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 669 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
508 size_t hdrlen, per_fragm, num_fragm, payload_len, left; 670 size_t hdrlen, per_fragm, num_fragm, payload_len, left;
509 struct sk_buff **frags, *first, *frag; 671 struct sk_buff **frags, *first, *frag;
510 int i; 672 int i;
@@ -515,9 +677,19 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
515 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 677 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
516 return TX_CONTINUE; 678 return TX_CONTINUE;
517 679
680 /*
681 * Warn when submitting a fragmented A-MPDU frame and drop it.
682 * This scenario is handled in __ieee80211_tx_prepare but extra
683 * caution taken here as fragmented ampdu may cause Tx stop.
684 */
685 if (WARN_ON(tx->flags & IEEE80211_TX_CTL_AMPDU ||
686 skb_get_queue_mapping(tx->skb) >=
687 ieee80211_num_regular_queues(&tx->local->hw)))
688 return TX_DROP;
689
518 first = tx->skb; 690 first = tx->skb;
519 691
520 hdrlen = ieee80211_get_hdrlen(tx->fc); 692 hdrlen = ieee80211_hdrlen(hdr->frame_control);
521 payload_len = first->len - hdrlen; 693 payload_len = first->len - hdrlen;
522 per_fragm = frag_threshold - hdrlen - FCS_LEN; 694 per_fragm = frag_threshold - hdrlen - FCS_LEN;
523 num_fragm = DIV_ROUND_UP(payload_len, per_fragm); 695 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
@@ -558,6 +730,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
558 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); 730 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
559 copylen = left > per_fragm ? per_fragm : left; 731 copylen = left > per_fragm ? per_fragm : left;
560 memcpy(skb_put(frag, copylen), pos, copylen); 732 memcpy(skb_put(frag, copylen), pos, copylen);
733 memcpy(frag->cb, first->cb, sizeof(frag->cb));
734 skb_copy_queue_mapping(frag, first);
561 735
562 pos += copylen; 736 pos += copylen;
563 left -= copylen; 737 left -= copylen;
@@ -570,7 +744,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
570 return TX_CONTINUE; 744 return TX_CONTINUE;
571 745
572 fail: 746 fail:
573 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name);
574 if (frags) { 747 if (frags) {
575 for (i = 0; i < num_fragm - 1; i++) 748 for (i = 0; i < num_fragm - 1; i++)
576 if (frags[i]) 749 if (frags[i])
@@ -581,7 +754,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
581 return TX_DROP; 754 return TX_DROP;
582} 755}
583 756
584static ieee80211_tx_result 757static ieee80211_tx_result debug_noinline
585ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) 758ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
586{ 759{
587 if (!tx->key) 760 if (!tx->key)
@@ -601,236 +774,57 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
601 return TX_DROP; 774 return TX_DROP;
602} 775}
603 776
604static ieee80211_tx_result 777static ieee80211_tx_result debug_noinline
605ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) 778ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
606{ 779{
607 struct rate_selection rsel; 780 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
608 struct ieee80211_supported_band *sband; 781 int next_len, i;
609 782 int group_addr = is_multicast_ether_addr(hdr->addr1);
610 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band];
611
612 if (likely(!tx->rate)) {
613 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
614 tx->rate = rsel.rate;
615 if (unlikely(rsel.probe)) {
616 tx->control->flags |=
617 IEEE80211_TXCTL_RATE_CTRL_PROBE;
618 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
619 tx->control->alt_retry_rate = tx->rate;
620 tx->rate = rsel.probe;
621 } else
622 tx->control->alt_retry_rate = NULL;
623
624 if (!tx->rate)
625 return TX_DROP;
626 } else
627 tx->control->alt_retry_rate = NULL;
628 783
629 if (tx->sdata->bss_conf.use_cts_prot && 784 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
630 (tx->flags & IEEE80211_TX_FRAGMENTED) && rsel.nonerp) { 785 hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
631 tx->last_frag_rate = tx->rate; 786 return TX_CONTINUE;
632 if (rsel.probe)
633 tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG;
634 else
635 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
636 tx->rate = rsel.nonerp;
637 tx->control->tx_rate = rsel.nonerp;
638 tx->control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE;
639 } else {
640 tx->last_frag_rate = tx->rate;
641 tx->control->tx_rate = tx->rate;
642 } 787 }
643 tx->control->tx_rate = tx->rate;
644
645 return TX_CONTINUE;
646}
647 788
648static ieee80211_tx_result 789 hdr->duration_id = ieee80211_duration(tx, group_addr,
649ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) 790 tx->extra_frag[0]->len);
650{
651 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
652 u16 fc = le16_to_cpu(hdr->frame_control);
653 u16 dur;
654 struct ieee80211_tx_control *control = tx->control;
655 791
656 if (!control->retry_limit) { 792 for (i = 0; i < tx->num_extra_frag; i++) {
657 if (!is_multicast_ether_addr(hdr->addr1)) { 793 if (i + 1 < tx->num_extra_frag) {
658 if (tx->skb->len + FCS_LEN > tx->local->rts_threshold 794 next_len = tx->extra_frag[i + 1]->len;
659 && tx->local->rts_threshold <
660 IEEE80211_MAX_RTS_THRESHOLD) {
661 control->flags |=
662 IEEE80211_TXCTL_USE_RTS_CTS;
663 control->flags |=
664 IEEE80211_TXCTL_LONG_RETRY_LIMIT;
665 control->retry_limit =
666 tx->local->long_retry_limit;
667 } else {
668 control->retry_limit =
669 tx->local->short_retry_limit;
670 }
671 } else { 795 } else {
672 control->retry_limit = 1; 796 next_len = 0;
673 } 797 tx->rate_idx = tx->last_frag_rate_idx;
674 }
675
676 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
677 /* Do not use multiple retry rates when sending fragmented
678 * frames.
679 * TODO: The last fragment could still use multiple retry
680 * rates. */
681 control->alt_retry_rate = NULL;
682 }
683
684 /* Use CTS protection for unicast frames sent using extended rates if
685 * there are associated non-ERP stations and RTS/CTS is not configured
686 * for the frame. */
687 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
688 (tx->rate->flags & IEEE80211_RATE_ERP_G) &&
689 (tx->flags & IEEE80211_TX_UNICAST) &&
690 tx->sdata->bss_conf.use_cts_prot &&
691 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS))
692 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT;
693
694 /* Transmit data frames using short preambles if the driver supports
695 * short preambles at the selected rate and short preambles are
696 * available on the network at the current point in time. */
697 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
698 (tx->rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
699 tx->sdata->bss_conf.use_short_preamble &&
700 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) {
701 tx->control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
702 }
703
704 /* Setup duration field for the first fragment of the frame. Duration
705 * for remaining fragments will be updated when they are being sent
706 * to low-level driver in ieee80211_tx(). */
707 dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1),
708 (tx->flags & IEEE80211_TX_FRAGMENTED) ?
709 tx->extra_frag[0]->len : 0);
710 hdr->duration_id = cpu_to_le16(dur);
711
712 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) ||
713 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) {
714 struct ieee80211_supported_band *sband;
715 struct ieee80211_rate *rate, *baserate;
716 int idx;
717
718 sband = tx->local->hw.wiphy->bands[
719 tx->local->hw.conf.channel->band];
720
721 /* Do not use multiple retry rates when using RTS/CTS */
722 control->alt_retry_rate = NULL;
723
724 /* Use min(data rate, max base rate) as CTS/RTS rate */
725 rate = tx->rate;
726 baserate = NULL;
727
728 for (idx = 0; idx < sband->n_bitrates; idx++) {
729 if (sband->bitrates[idx].bitrate > rate->bitrate)
730 continue;
731 if (tx->sdata->basic_rates & BIT(idx) &&
732 (!baserate ||
733 (baserate->bitrate < sband->bitrates[idx].bitrate)))
734 baserate = &sband->bitrates[idx];
735 } 798 }
736 799
737 if (baserate) 800 hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data;
738 control->rts_cts_rate = baserate; 801 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
739 else
740 control->rts_cts_rate = &sband->bitrates[0];
741 }
742
743 if (tx->sta) {
744 control->aid = tx->sta->aid;
745 tx->sta->tx_packets++;
746 tx->sta->tx_fragments++;
747 tx->sta->tx_bytes += tx->skb->len;
748 if (tx->extra_frag) {
749 int i;
750 tx->sta->tx_fragments += tx->num_extra_frag;
751 for (i = 0; i < tx->num_extra_frag; i++) {
752 tx->sta->tx_bytes +=
753 tx->extra_frag[i]->len;
754 }
755 }
756 } 802 }
757 803
758 return TX_CONTINUE; 804 return TX_CONTINUE;
759} 805}
760 806
761static ieee80211_tx_result 807static ieee80211_tx_result debug_noinline
762ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx) 808ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
763{ 809{
764 struct ieee80211_local *local = tx->local; 810 int i;
765 struct sk_buff *skb = tx->skb;
766 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
767 u32 load = 0, hdrtime;
768 struct ieee80211_rate *rate = tx->rate;
769
770 /* TODO: this could be part of tx_status handling, so that the number
771 * of retries would be known; TX rate should in that case be stored
772 * somewhere with the packet */
773
774 /* Estimate total channel use caused by this frame */
775
776 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
777 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
778
779 if (tx->channel->band == IEEE80211_BAND_5GHZ ||
780 (tx->channel->band == IEEE80211_BAND_2GHZ &&
781 rate->flags & IEEE80211_RATE_ERP_G))
782 hdrtime = CHAN_UTIL_HDR_SHORT;
783 else
784 hdrtime = CHAN_UTIL_HDR_LONG;
785
786 load = hdrtime;
787 if (!is_multicast_ether_addr(hdr->addr1))
788 load += hdrtime;
789
790 if (tx->control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
791 load += 2 * hdrtime;
792 else if (tx->control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
793 load += hdrtime;
794 811
795 /* TODO: optimise again */ 812 if (!tx->sta)
796 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; 813 return TX_CONTINUE;
797 814
815 tx->sta->tx_packets++;
816 tx->sta->tx_fragments++;
817 tx->sta->tx_bytes += tx->skb->len;
798 if (tx->extra_frag) { 818 if (tx->extra_frag) {
799 int i; 819 tx->sta->tx_fragments += tx->num_extra_frag;
800 for (i = 0; i < tx->num_extra_frag; i++) { 820 for (i = 0; i < tx->num_extra_frag; i++)
801 load += 2 * hdrtime; 821 tx->sta->tx_bytes += tx->extra_frag[i]->len;
802 load += tx->extra_frag[i]->len *
803 tx->rate->bitrate;
804 }
805 } 822 }
806 823
807 /* Divide channel_use by 8 to avoid wrapping around the counter */
808 load >>= CHAN_UTIL_SHIFT;
809 local->channel_use_raw += load;
810 if (tx->sta)
811 tx->sta->channel_use_raw += load;
812 tx->sdata->channel_use_raw += load;
813
814 return TX_CONTINUE; 824 return TX_CONTINUE;
815} 825}
816 826
817 827
818typedef ieee80211_tx_result (*ieee80211_tx_handler)(struct ieee80211_tx_data *);
819static ieee80211_tx_handler ieee80211_tx_handlers[] =
820{
821 ieee80211_tx_h_check_assoc,
822 ieee80211_tx_h_sequence,
823 ieee80211_tx_h_ps_buf,
824 ieee80211_tx_h_select_key,
825 ieee80211_tx_h_michael_mic_add,
826 ieee80211_tx_h_fragment,
827 ieee80211_tx_h_encrypt,
828 ieee80211_tx_h_rate_ctrl,
829 ieee80211_tx_h_misc,
830 ieee80211_tx_h_load_stats,
831 NULL
832};
833
834/* actual transmit path */ 828/* actual transmit path */
835 829
836/* 830/*
@@ -854,12 +848,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
854 (struct ieee80211_radiotap_header *) skb->data; 848 (struct ieee80211_radiotap_header *) skb->data;
855 struct ieee80211_supported_band *sband; 849 struct ieee80211_supported_band *sband;
856 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 850 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
857 struct ieee80211_tx_control *control = tx->control; 851 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
858 852
859 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; 853 sband = tx->local->hw.wiphy->bands[tx->channel->band];
860 854
861 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 855 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
862 tx->flags |= IEEE80211_TX_INJECTED; 856 info->flags |= IEEE80211_TX_CTL_INJECTED;
863 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 857 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
864 858
865 /* 859 /*
@@ -896,7 +890,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
896 r = &sband->bitrates[i]; 890 r = &sband->bitrates[i];
897 891
898 if (r->bitrate == target_rate) { 892 if (r->bitrate == target_rate) {
899 tx->rate = r; 893 tx->rate_idx = i;
900 break; 894 break;
901 } 895 }
902 } 896 }
@@ -907,7 +901,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
907 * radiotap uses 0 for 1st ant, mac80211 is 1 for 901 * radiotap uses 0 for 1st ant, mac80211 is 1 for
908 * 1st ant 902 * 1st ant
909 */ 903 */
910 control->antenna_sel_tx = (*iterator.this_arg) + 1; 904 info->antenna_sel_tx = (*iterator.this_arg) + 1;
911 break; 905 break;
912 906
913#if 0 907#if 0
@@ -931,8 +925,8 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
931 skb_trim(skb, skb->len - FCS_LEN); 925 skb_trim(skb, skb->len - FCS_LEN);
932 } 926 }
933 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) 927 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
934 control->flags &= 928 info->flags &=
935 ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; 929 ~IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
936 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) 930 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
937 tx->flags |= IEEE80211_TX_FRAGMENTED; 931 tx->flags |= IEEE80211_TX_FRAGMENTED;
938 break; 932 break;
@@ -967,12 +961,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
967static ieee80211_tx_result 961static ieee80211_tx_result
968__ieee80211_tx_prepare(struct ieee80211_tx_data *tx, 962__ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
969 struct sk_buff *skb, 963 struct sk_buff *skb,
970 struct net_device *dev, 964 struct net_device *dev)
971 struct ieee80211_tx_control *control)
972{ 965{
973 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 966 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
974 struct ieee80211_hdr *hdr; 967 struct ieee80211_hdr *hdr;
975 struct ieee80211_sub_if_data *sdata; 968 struct ieee80211_sub_if_data *sdata;
969 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
976 970
977 int hdrlen; 971 int hdrlen;
978 972
@@ -981,7 +975,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
981 tx->dev = dev; /* use original interface */ 975 tx->dev = dev; /* use original interface */
982 tx->local = local; 976 tx->local = local;
983 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); 977 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev);
984 tx->control = control; 978 tx->channel = local->hw.conf.channel;
979 tx->rate_idx = -1;
980 tx->last_frag_rate_idx = -1;
985 /* 981 /*
986 * Set this flag (used below to indicate "automatic fragmentation"), 982 * Set this flag (used below to indicate "automatic fragmentation"),
987 * it will be cleared/left by radiotap as desired. 983 * it will be cleared/left by radiotap as desired.
@@ -1008,34 +1004,33 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1008 1004
1009 if (is_multicast_ether_addr(hdr->addr1)) { 1005 if (is_multicast_ether_addr(hdr->addr1)) {
1010 tx->flags &= ~IEEE80211_TX_UNICAST; 1006 tx->flags &= ~IEEE80211_TX_UNICAST;
1011 control->flags |= IEEE80211_TXCTL_NO_ACK; 1007 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1012 } else { 1008 } else {
1013 tx->flags |= IEEE80211_TX_UNICAST; 1009 tx->flags |= IEEE80211_TX_UNICAST;
1014 control->flags &= ~IEEE80211_TXCTL_NO_ACK; 1010 info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
1015 } 1011 }
1016 1012
1017 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 1013 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
1018 if ((tx->flags & IEEE80211_TX_UNICAST) && 1014 if ((tx->flags & IEEE80211_TX_UNICAST) &&
1019 skb->len + FCS_LEN > local->fragmentation_threshold && 1015 skb->len + FCS_LEN > local->fragmentation_threshold &&
1020 !local->ops->set_frag_threshold) 1016 !local->ops->set_frag_threshold &&
1017 !(info->flags & IEEE80211_TX_CTL_AMPDU))
1021 tx->flags |= IEEE80211_TX_FRAGMENTED; 1018 tx->flags |= IEEE80211_TX_FRAGMENTED;
1022 else 1019 else
1023 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 1020 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1024 } 1021 }
1025 1022
1026 if (!tx->sta) 1023 if (!tx->sta)
1027 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1024 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1028 else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) { 1025 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1029 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1026 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1030 tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT;
1031 }
1032 1027
1033 hdrlen = ieee80211_get_hdrlen(tx->fc); 1028 hdrlen = ieee80211_get_hdrlen(tx->fc);
1034 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 1029 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1035 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 1030 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1036 tx->ethertype = (pos[0] << 8) | pos[1]; 1031 tx->ethertype = (pos[0] << 8) | pos[1];
1037 } 1032 }
1038 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; 1033 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1039 1034
1040 return TX_CONTINUE; 1035 return TX_CONTINUE;
1041} 1036}
@@ -1045,14 +1040,12 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1045 */ 1040 */
1046static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, 1041static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1047 struct sk_buff *skb, 1042 struct sk_buff *skb,
1048 struct net_device *mdev, 1043 struct net_device *mdev)
1049 struct ieee80211_tx_control *control)
1050{ 1044{
1051 struct ieee80211_tx_packet_data *pkt_data; 1045 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1052 struct net_device *dev; 1046 struct net_device *dev;
1053 1047
1054 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1048 dev = dev_get_by_index(&init_net, info->control.ifindex);
1055 dev = dev_get_by_index(&init_net, pkt_data->ifindex);
1056 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { 1049 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) {
1057 dev_put(dev); 1050 dev_put(dev);
1058 dev = NULL; 1051 dev = NULL;
@@ -1060,7 +1053,7 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1060 if (unlikely(!dev)) 1053 if (unlikely(!dev))
1061 return -ENODEV; 1054 return -ENODEV;
1062 /* initialises tx with control */ 1055 /* initialises tx with control */
1063 __ieee80211_tx_prepare(tx, skb, dev, control); 1056 __ieee80211_tx_prepare(tx, skb, dev);
1064 dev_put(dev); 1057 dev_put(dev);
1065 return 0; 1058 return 0;
1066} 1059}
@@ -1068,50 +1061,49 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1068static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1061static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1069 struct ieee80211_tx_data *tx) 1062 struct ieee80211_tx_data *tx)
1070{ 1063{
1071 struct ieee80211_tx_control *control = tx->control; 1064 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1072 int ret, i; 1065 int ret, i;
1073 1066
1074 if (!ieee80211_qdisc_installed(local->mdev) && 1067 if (netif_subqueue_stopped(local->mdev, skb))
1075 __ieee80211_queue_stopped(local, 0)) {
1076 netif_stop_queue(local->mdev);
1077 return IEEE80211_TX_AGAIN; 1068 return IEEE80211_TX_AGAIN;
1078 } 1069
1079 if (skb) { 1070 if (skb) {
1080 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1071 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1081 "TX to low-level driver", skb); 1072 "TX to low-level driver", skb);
1082 ret = local->ops->tx(local_to_hw(local), skb, control); 1073 ret = local->ops->tx(local_to_hw(local), skb);
1083 if (ret) 1074 if (ret)
1084 return IEEE80211_TX_AGAIN; 1075 return IEEE80211_TX_AGAIN;
1085 local->mdev->trans_start = jiffies; 1076 local->mdev->trans_start = jiffies;
1086 ieee80211_led_tx(local, 1); 1077 ieee80211_led_tx(local, 1);
1087 } 1078 }
1088 if (tx->extra_frag) { 1079 if (tx->extra_frag) {
1089 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
1090 IEEE80211_TXCTL_USE_CTS_PROTECT |
1091 IEEE80211_TXCTL_CLEAR_PS_FILT |
1092 IEEE80211_TXCTL_FIRST_FRAGMENT);
1093 for (i = 0; i < tx->num_extra_frag; i++) { 1080 for (i = 0; i < tx->num_extra_frag; i++) {
1094 if (!tx->extra_frag[i]) 1081 if (!tx->extra_frag[i])
1095 continue; 1082 continue;
1096 if (__ieee80211_queue_stopped(local, control->queue)) 1083 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1084 info->flags &= ~(IEEE80211_TX_CTL_USE_RTS_CTS |
1085 IEEE80211_TX_CTL_USE_CTS_PROTECT |
1086 IEEE80211_TX_CTL_CLEAR_PS_FILT |
1087 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1088 if (netif_subqueue_stopped(local->mdev,
1089 tx->extra_frag[i]))
1097 return IEEE80211_TX_FRAG_AGAIN; 1090 return IEEE80211_TX_FRAG_AGAIN;
1098 if (i == tx->num_extra_frag) { 1091 if (i == tx->num_extra_frag) {
1099 control->tx_rate = tx->last_frag_rate; 1092 info->tx_rate_idx = tx->last_frag_rate_idx;
1100 1093
1101 if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) 1094 if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG)
1102 control->flags |= 1095 info->flags |=
1103 IEEE80211_TXCTL_RATE_CTRL_PROBE; 1096 IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1104 else 1097 else
1105 control->flags &= 1098 info->flags &=
1106 ~IEEE80211_TXCTL_RATE_CTRL_PROBE; 1099 ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1107 } 1100 }
1108 1101
1109 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1102 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1110 "TX to low-level driver", 1103 "TX to low-level driver",
1111 tx->extra_frag[i]); 1104 tx->extra_frag[i]);
1112 ret = local->ops->tx(local_to_hw(local), 1105 ret = local->ops->tx(local_to_hw(local),
1113 tx->extra_frag[i], 1106 tx->extra_frag[i]);
1114 control);
1115 if (ret) 1107 if (ret)
1116 return IEEE80211_TX_FRAG_AGAIN; 1108 return IEEE80211_TX_FRAG_AGAIN;
1117 local->mdev->trans_start = jiffies; 1109 local->mdev->trans_start = jiffies;
@@ -1124,17 +1116,65 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1124 return IEEE80211_TX_OK; 1116 return IEEE80211_TX_OK;
1125} 1117}
1126 1118
1127static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, 1119/*
1128 struct ieee80211_tx_control *control) 1120 * Invoke TX handlers, return 0 on success and non-zero if the
1121 * frame was dropped or queued.
1122 */
1123static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1124{
1125 struct sk_buff *skb = tx->skb;
1126 ieee80211_tx_result res = TX_DROP;
1127 int i;
1128
1129#define CALL_TXH(txh) \
1130 res = txh(tx); \
1131 if (res != TX_CONTINUE) \
1132 goto txh_done;
1133
1134 CALL_TXH(ieee80211_tx_h_check_assoc)
1135 CALL_TXH(ieee80211_tx_h_ps_buf)
1136 CALL_TXH(ieee80211_tx_h_select_key)
1137 CALL_TXH(ieee80211_tx_h_michael_mic_add)
1138 CALL_TXH(ieee80211_tx_h_rate_ctrl)
1139 CALL_TXH(ieee80211_tx_h_misc)
1140 CALL_TXH(ieee80211_tx_h_sequence)
1141 CALL_TXH(ieee80211_tx_h_fragment)
1142 /* handlers after fragment must be aware of tx info fragmentation! */
1143 CALL_TXH(ieee80211_tx_h_encrypt)
1144 CALL_TXH(ieee80211_tx_h_calculate_duration)
1145 CALL_TXH(ieee80211_tx_h_stats)
1146#undef CALL_TXH
1147
1148 txh_done:
1149 if (unlikely(res == TX_DROP)) {
1150 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1151 dev_kfree_skb(skb);
1152 for (i = 0; i < tx->num_extra_frag; i++)
1153 if (tx->extra_frag[i])
1154 dev_kfree_skb(tx->extra_frag[i]);
1155 kfree(tx->extra_frag);
1156 return -1;
1157 } else if (unlikely(res == TX_QUEUED)) {
1158 I802_DEBUG_INC(tx->local->tx_handlers_queued);
1159 return -1;
1160 }
1161
1162 return 0;
1163}
1164
1165static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1129{ 1166{
1130 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1167 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1131 struct sta_info *sta; 1168 struct sta_info *sta;
1132 ieee80211_tx_handler *handler;
1133 struct ieee80211_tx_data tx; 1169 struct ieee80211_tx_data tx;
1134 ieee80211_tx_result res = TX_DROP, res_prepare; 1170 ieee80211_tx_result res_prepare;
1135 int ret, i, retries = 0; 1171 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1172 int ret, i;
1173 u16 queue;
1174
1175 queue = skb_get_queue_mapping(skb);
1136 1176
1137 WARN_ON(__ieee80211_queue_pending(local, control->queue)); 1177 WARN_ON(test_bit(queue, local->queues_pending));
1138 1178
1139 if (unlikely(skb->len < 10)) { 1179 if (unlikely(skb->len < 10)) {
1140 dev_kfree_skb(skb); 1180 dev_kfree_skb(skb);
@@ -1144,7 +1184,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1144 rcu_read_lock(); 1184 rcu_read_lock();
1145 1185
1146 /* initialises tx */ 1186 /* initialises tx */
1147 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); 1187 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
1148 1188
1149 if (res_prepare == TX_DROP) { 1189 if (res_prepare == TX_DROP) {
1150 dev_kfree_skb(skb); 1190 dev_kfree_skb(skb);
@@ -1154,86 +1194,53 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1154 1194
1155 sta = tx.sta; 1195 sta = tx.sta;
1156 tx.channel = local->hw.conf.channel; 1196 tx.channel = local->hw.conf.channel;
1197 info->band = tx.channel->band;
1157 1198
1158 for (handler = ieee80211_tx_handlers; *handler != NULL; 1199 if (invoke_tx_handlers(&tx))
1159 handler++) { 1200 goto out;
1160 res = (*handler)(&tx);
1161 if (res != TX_CONTINUE)
1162 break;
1163 }
1164
1165 skb = tx.skb; /* handlers are allowed to change skb */
1166
1167 if (unlikely(res == TX_DROP)) {
1168 I802_DEBUG_INC(local->tx_handlers_drop);
1169 goto drop;
1170 }
1171
1172 if (unlikely(res == TX_QUEUED)) {
1173 I802_DEBUG_INC(local->tx_handlers_queued);
1174 rcu_read_unlock();
1175 return 0;
1176 }
1177
1178 if (tx.extra_frag) {
1179 for (i = 0; i < tx.num_extra_frag; i++) {
1180 int next_len, dur;
1181 struct ieee80211_hdr *hdr =
1182 (struct ieee80211_hdr *)
1183 tx.extra_frag[i]->data;
1184
1185 if (i + 1 < tx.num_extra_frag) {
1186 next_len = tx.extra_frag[i + 1]->len;
1187 } else {
1188 next_len = 0;
1189 tx.rate = tx.last_frag_rate;
1190 }
1191 dur = ieee80211_duration(&tx, 0, next_len);
1192 hdr->duration_id = cpu_to_le16(dur);
1193 }
1194 }
1195 1201
1196retry: 1202retry:
1197 ret = __ieee80211_tx(local, skb, &tx); 1203 ret = __ieee80211_tx(local, skb, &tx);
1198 if (ret) { 1204 if (ret) {
1199 struct ieee80211_tx_stored_packet *store = 1205 struct ieee80211_tx_stored_packet *store;
1200 &local->pending_packet[control->queue]; 1206
1207 /*
1208 * Since there are no fragmented frames on A-MPDU
1209 * queues, there's no reason for a driver to reject
1210 * a frame there, warn and drop it.
1211 */
1212 if (WARN_ON(queue >= ieee80211_num_regular_queues(&local->hw)))
1213 goto drop;
1214
1215 store = &local->pending_packet[queue];
1201 1216
1202 if (ret == IEEE80211_TX_FRAG_AGAIN) 1217 if (ret == IEEE80211_TX_FRAG_AGAIN)
1203 skb = NULL; 1218 skb = NULL;
1204 set_bit(IEEE80211_LINK_STATE_PENDING, 1219 set_bit(queue, local->queues_pending);
1205 &local->state[control->queue]);
1206 smp_mb(); 1220 smp_mb();
1207 /* When the driver gets out of buffers during sending of 1221 /*
1208 * fragments and calls ieee80211_stop_queue, there is 1222 * When the driver gets out of buffers during sending of
1209 * a small window between IEEE80211_LINK_STATE_XOFF and 1223 * fragments and calls ieee80211_stop_queue, the netif
1210 * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer 1224 * subqueue is stopped. There is, however, a small window
1225 * in which the PENDING bit is not yet set. If a buffer
1211 * gets available in that window (i.e. driver calls 1226 * gets available in that window (i.e. driver calls
1212 * ieee80211_wake_queue), we would end up with ieee80211_tx 1227 * ieee80211_wake_queue), we would end up with ieee80211_tx
1213 * called with IEEE80211_LINK_STATE_PENDING. Prevent this by 1228 * called with the PENDING bit still set. Prevent this by
1214 * continuing transmitting here when that situation is 1229 * continuing transmitting here when that situation is
1215 * possible to have happened. */ 1230 * possible to have happened.
1216 if (!__ieee80211_queue_stopped(local, control->queue)) { 1231 */
1217 clear_bit(IEEE80211_LINK_STATE_PENDING, 1232 if (!__netif_subqueue_stopped(local->mdev, queue)) {
1218 &local->state[control->queue]); 1233 clear_bit(queue, local->queues_pending);
1219 retries++;
1220 /*
1221 * Driver bug, it's rejecting packets but
1222 * not stopping queues.
1223 */
1224 if (WARN_ON_ONCE(retries > 5))
1225 goto drop;
1226 goto retry; 1234 goto retry;
1227 } 1235 }
1228 memcpy(&store->control, control,
1229 sizeof(struct ieee80211_tx_control));
1230 store->skb = skb; 1236 store->skb = skb;
1231 store->extra_frag = tx.extra_frag; 1237 store->extra_frag = tx.extra_frag;
1232 store->num_extra_frag = tx.num_extra_frag; 1238 store->num_extra_frag = tx.num_extra_frag;
1233 store->last_frag_rate = tx.last_frag_rate; 1239 store->last_frag_rate_idx = tx.last_frag_rate_idx;
1234 store->last_frag_rate_ctrl_probe = 1240 store->last_frag_rate_ctrl_probe =
1235 !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); 1241 !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG);
1236 } 1242 }
1243 out:
1237 rcu_read_unlock(); 1244 rcu_read_unlock();
1238 return 0; 1245 return 0;
1239 1246
@@ -1250,24 +1257,57 @@ retry:
1250 1257
1251/* device xmit handlers */ 1258/* device xmit handlers */
1252 1259
1260static int ieee80211_skb_resize(struct ieee80211_local *local,
1261 struct sk_buff *skb,
1262 int head_need, bool may_encrypt)
1263{
1264 int tail_need = 0;
1265
1266 /*
1267 * This could be optimised, devices that do full hardware
1268 * crypto (including TKIP MMIC) need no tailroom... But we
1269 * have no drivers for such devices currently.
1270 */
1271 if (may_encrypt) {
1272 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1273 tail_need -= skb_tailroom(skb);
1274 tail_need = max_t(int, tail_need, 0);
1275 }
1276
1277 if (head_need || tail_need) {
1278 /* Sorry. Can't account for this any more */
1279 skb_orphan(skb);
1280 }
1281
1282 if (skb_header_cloned(skb))
1283 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1284 else
1285 I802_DEBUG_INC(local->tx_expand_skb_head);
1286
1287 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1288 printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n",
1289 wiphy_name(local->hw.wiphy));
1290 return -ENOMEM;
1291 }
1292
1293 /* update truesize too */
1294 skb->truesize += head_need + tail_need;
1295
1296 return 0;
1297}
1298
1253int ieee80211_master_start_xmit(struct sk_buff *skb, 1299int ieee80211_master_start_xmit(struct sk_buff *skb,
1254 struct net_device *dev) 1300 struct net_device *dev)
1255{ 1301{
1256 struct ieee80211_tx_control control; 1302 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1257 struct ieee80211_tx_packet_data *pkt_data;
1258 struct net_device *odev = NULL; 1303 struct net_device *odev = NULL;
1259 struct ieee80211_sub_if_data *osdata; 1304 struct ieee80211_sub_if_data *osdata;
1260 int headroom; 1305 int headroom;
1306 bool may_encrypt;
1261 int ret; 1307 int ret;
1262 1308
1263 /* 1309 if (info->control.ifindex)
1264 * copy control out of the skb so other people can use skb->cb 1310 odev = dev_get_by_index(&init_net, info->control.ifindex);
1265 */
1266 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1267 memset(&control, 0, sizeof(struct ieee80211_tx_control));
1268
1269 if (pkt_data->ifindex)
1270 odev = dev_get_by_index(&init_net, pkt_data->ifindex);
1271 if (unlikely(odev && !is_ieee80211_device(odev, dev))) { 1311 if (unlikely(odev && !is_ieee80211_device(odev, dev))) {
1272 dev_put(odev); 1312 dev_put(odev);
1273 odev = NULL; 1313 odev = NULL;
@@ -1280,32 +1320,25 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1280 dev_kfree_skb(skb); 1320 dev_kfree_skb(skb);
1281 return 0; 1321 return 0;
1282 } 1322 }
1323
1283 osdata = IEEE80211_DEV_TO_SUB_IF(odev); 1324 osdata = IEEE80211_DEV_TO_SUB_IF(odev);
1284 1325
1285 headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM; 1326 may_encrypt = !(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT);
1286 if (skb_headroom(skb) < headroom) { 1327
1287 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { 1328 headroom = osdata->local->tx_headroom;
1288 dev_kfree_skb(skb); 1329 if (may_encrypt)
1289 dev_put(odev); 1330 headroom += IEEE80211_ENCRYPT_HEADROOM;
1290 return 0; 1331 headroom -= skb_headroom(skb);
1291 } 1332 headroom = max_t(int, 0, headroom);
1333
1334 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
1335 dev_kfree_skb(skb);
1336 dev_put(odev);
1337 return 0;
1292 } 1338 }
1293 1339
1294 control.vif = &osdata->vif; 1340 info->control.vif = &osdata->vif;
1295 control.type = osdata->vif.type; 1341 ret = ieee80211_tx(odev, skb);
1296 if (pkt_data->flags & IEEE80211_TXPD_REQ_TX_STATUS)
1297 control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS;
1298 if (pkt_data->flags & IEEE80211_TXPD_DO_NOT_ENCRYPT)
1299 control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
1300 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE)
1301 control.flags |= IEEE80211_TXCTL_REQUEUE;
1302 if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME)
1303 control.flags |= IEEE80211_TXCTL_EAPOL_FRAME;
1304 if (pkt_data->flags & IEEE80211_TXPD_AMPDU)
1305 control.flags |= IEEE80211_TXCTL_AMPDU;
1306 control.queue = pkt_data->queue;
1307
1308 ret = ieee80211_tx(odev, skb, &control);
1309 dev_put(odev); 1342 dev_put(odev);
1310 1343
1311 return ret; 1344 return ret;
@@ -1315,7 +1348,7 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1315 struct net_device *dev) 1348 struct net_device *dev)
1316{ 1349{
1317 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1350 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1318 struct ieee80211_tx_packet_data *pkt_data; 1351 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1319 struct ieee80211_radiotap_header *prthdr = 1352 struct ieee80211_radiotap_header *prthdr =
1320 (struct ieee80211_radiotap_header *)skb->data; 1353 (struct ieee80211_radiotap_header *)skb->data;
1321 u16 len_rthdr; 1354 u16 len_rthdr;
@@ -1337,12 +1370,12 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1337 1370
1338 skb->dev = local->mdev; 1371 skb->dev = local->mdev;
1339 1372
1340 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1341 memset(pkt_data, 0, sizeof(*pkt_data));
1342 /* needed because we set skb device to master */ 1373 /* needed because we set skb device to master */
1343 pkt_data->ifindex = dev->ifindex; 1374 info->control.ifindex = dev->ifindex;
1344 1375
1345 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; 1376 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
1377 /* Interfaces should always request a status report */
1378 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1346 1379
1347 /* 1380 /*
1348 * fix up the pointers accounting for the radiotap 1381 * fix up the pointers accounting for the radiotap
@@ -1386,10 +1419,11 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1386 struct net_device *dev) 1419 struct net_device *dev)
1387{ 1420{
1388 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1421 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1389 struct ieee80211_tx_packet_data *pkt_data; 1422 struct ieee80211_tx_info *info;
1390 struct ieee80211_sub_if_data *sdata; 1423 struct ieee80211_sub_if_data *sdata;
1391 int ret = 1, head_need; 1424 int ret = 1, head_need;
1392 u16 ethertype, hdrlen, meshhdrlen = 0, fc; 1425 u16 ethertype, hdrlen, meshhdrlen = 0;
1426 __le16 fc;
1393 struct ieee80211_hdr hdr; 1427 struct ieee80211_hdr hdr;
1394 struct ieee80211s_hdr mesh_hdr; 1428 struct ieee80211s_hdr mesh_hdr;
1395 const u8 *encaps_data; 1429 const u8 *encaps_data;
@@ -1400,8 +1434,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1400 1434
1401 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1435 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1402 if (unlikely(skb->len < ETH_HLEN)) { 1436 if (unlikely(skb->len < ETH_HLEN)) {
1403 printk(KERN_DEBUG "%s: short skb (len=%d)\n",
1404 dev->name, skb->len);
1405 ret = 0; 1437 ret = 0;
1406 goto fail; 1438 goto fail;
1407 } 1439 }
@@ -1412,12 +1444,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1412 /* convert Ethernet header to proper 802.11 header (based on 1444 /* convert Ethernet header to proper 802.11 header (based on
1413 * operation mode) */ 1445 * operation mode) */
1414 ethertype = (skb->data[12] << 8) | skb->data[13]; 1446 ethertype = (skb->data[12] << 8) | skb->data[13];
1415 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; 1447 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1416 1448
1417 switch (sdata->vif.type) { 1449 switch (sdata->vif.type) {
1418 case IEEE80211_IF_TYPE_AP: 1450 case IEEE80211_IF_TYPE_AP:
1419 case IEEE80211_IF_TYPE_VLAN: 1451 case IEEE80211_IF_TYPE_VLAN:
1420 fc |= IEEE80211_FCTL_FROMDS; 1452 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1421 /* DA BSSID SA */ 1453 /* DA BSSID SA */
1422 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1454 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1423 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1455 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
@@ -1425,7 +1457,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1425 hdrlen = 24; 1457 hdrlen = 24;
1426 break; 1458 break;
1427 case IEEE80211_IF_TYPE_WDS: 1459 case IEEE80211_IF_TYPE_WDS:
1428 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; 1460 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1429 /* RA TA DA SA */ 1461 /* RA TA DA SA */
1430 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1462 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1431 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1463 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
@@ -1435,7 +1467,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1435 break; 1467 break;
1436#ifdef CONFIG_MAC80211_MESH 1468#ifdef CONFIG_MAC80211_MESH
1437 case IEEE80211_IF_TYPE_MESH_POINT: 1469 case IEEE80211_IF_TYPE_MESH_POINT:
1438 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; 1470 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1439 /* RA TA DA SA */ 1471 /* RA TA DA SA */
1440 if (is_multicast_ether_addr(skb->data)) 1472 if (is_multicast_ether_addr(skb->data))
1441 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1473 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1465,7 +1497,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1465 break; 1497 break;
1466#endif 1498#endif
1467 case IEEE80211_IF_TYPE_STA: 1499 case IEEE80211_IF_TYPE_STA:
1468 fc |= IEEE80211_FCTL_TODS; 1500 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1469 /* BSSID SA DA */ 1501 /* BSSID SA DA */
1470 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); 1502 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN);
1471 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1503 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1493,13 +1525,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1493 rcu_read_lock(); 1525 rcu_read_lock();
1494 sta = sta_info_get(local, hdr.addr1); 1526 sta = sta_info_get(local, hdr.addr1);
1495 if (sta) 1527 if (sta)
1496 sta_flags = sta->flags; 1528 sta_flags = get_sta_flags(sta);
1497 rcu_read_unlock(); 1529 rcu_read_unlock();
1498 } 1530 }
1499 1531
1500 /* receiver is QoS enabled, use a QoS type frame */ 1532 /* receiver and we are QoS enabled, use a QoS type frame */
1501 if (sta_flags & WLAN_STA_WME) { 1533 if (sta_flags & WLAN_STA_WME &&
1502 fc |= IEEE80211_STYPE_QOS_DATA; 1534 ieee80211_num_regular_queues(&local->hw) >= 4) {
1535 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1503 hdrlen += 2; 1536 hdrlen += 2;
1504 } 1537 }
1505 1538
@@ -1527,7 +1560,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1527 goto fail; 1560 goto fail;
1528 } 1561 }
1529 1562
1530 hdr.frame_control = cpu_to_le16(fc); 1563 hdr.frame_control = fc;
1531 hdr.duration_id = 0; 1564 hdr.duration_id = 0;
1532 hdr.seq_ctrl = 0; 1565 hdr.seq_ctrl = 0;
1533 1566
@@ -1562,32 +1595,26 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1562 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and 1595 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1563 * alloc_skb() (net/core/skbuff.c) 1596 * alloc_skb() (net/core/skbuff.c)
1564 */ 1597 */
1565 head_need = hdrlen + encaps_len + meshhdrlen + local->tx_headroom; 1598 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
1566 head_need -= skb_headroom(skb);
1567 1599
1568 /* We are going to modify skb data, so make a copy of it if happens to 1600 /*
1569 * be cloned. This could happen, e.g., with Linux bridge code passing 1601 * So we need to modify the skb header and hence need a copy of
1570 * us broadcast frames. */ 1602 * that. The head_need variable above doesn't, so far, include
1603 * the needed header space that we don't need right away. If we
1604 * can, then we don't reallocate right now but only after the
1605 * frame arrives at the master device (if it does...)
1606 *
1607 * If we cannot, however, then we will reallocate to include all
1608 * the ever needed space. Also, if we need to reallocate it anyway,
1609 * make it big enough for everything we may ever need.
1610 */
1571 1611
1572 if (head_need > 0 || skb_cloned(skb)) { 1612 if (head_need > 0 || skb_cloned(skb)) {
1573#if 0 1613 head_need += IEEE80211_ENCRYPT_HEADROOM;
1574 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " 1614 head_need += local->tx_headroom;
1575 "of headroom\n", dev->name, head_need); 1615 head_need = max_t(int, 0, head_need);
1576#endif 1616 if (ieee80211_skb_resize(local, skb, head_need, true))
1577
1578 if (skb_cloned(skb))
1579 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1580 else
1581 I802_DEBUG_INC(local->tx_expand_skb_head);
1582 /* Since we have to reallocate the buffer, make sure that there
1583 * is enough room for possible WEP IV/ICV and TKIP (8 bytes
1584 * before payload and 12 after). */
1585 if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8),
1586 12, GFP_ATOMIC)) {
1587 printk(KERN_DEBUG "%s: failed to reallocate TX buffer"
1588 "\n", dev->name);
1589 goto fail; 1617 goto fail;
1590 }
1591 } 1618 }
1592 1619
1593 if (encaps_data) { 1620 if (encaps_data) {
@@ -1602,7 +1629,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1602 h_pos += meshhdrlen; 1629 h_pos += meshhdrlen;
1603 } 1630 }
1604 1631
1605 if (fc & IEEE80211_STYPE_QOS_DATA) { 1632 if (ieee80211_is_data_qos(fc)) {
1606 __le16 *qos_control; 1633 __le16 *qos_control;
1607 1634
1608 qos_control = (__le16*) skb_push(skb, 2); 1635 qos_control = (__le16*) skb_push(skb, 2);
@@ -1618,11 +1645,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1618 nh_pos += hdrlen; 1645 nh_pos += hdrlen;
1619 h_pos += hdrlen; 1646 h_pos += hdrlen;
1620 1647
1621 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1648 info = IEEE80211_SKB_CB(skb);
1622 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); 1649 memset(info, 0, sizeof(*info));
1623 pkt_data->ifindex = dev->ifindex; 1650 info->control.ifindex = dev->ifindex;
1624 if (ethertype == ETH_P_PAE) 1651 if (ethertype == ETH_P_PAE)
1625 pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME; 1652 info->flags |= IEEE80211_TX_CTL_EAPOL_FRAME;
1653
1654 /* Interfaces should always request a status report */
1655 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1626 1656
1627 skb->dev = local->mdev; 1657 skb->dev = local->mdev;
1628 dev->stats.tx_packets++; 1658 dev->stats.tx_packets++;
@@ -1647,46 +1677,55 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1647 return ret; 1677 return ret;
1648} 1678}
1649 1679
1650/* helper functions for pending packets for when queues are stopped */
1651 1680
1681/*
1682 * ieee80211_clear_tx_pending may not be called in a context where
1683 * it is possible that it packets could come in again.
1684 */
1652void ieee80211_clear_tx_pending(struct ieee80211_local *local) 1685void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1653{ 1686{
1654 int i, j; 1687 int i, j;
1655 struct ieee80211_tx_stored_packet *store; 1688 struct ieee80211_tx_stored_packet *store;
1656 1689
1657 for (i = 0; i < local->hw.queues; i++) { 1690 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1658 if (!__ieee80211_queue_pending(local, i)) 1691 if (!test_bit(i, local->queues_pending))
1659 continue; 1692 continue;
1660 store = &local->pending_packet[i]; 1693 store = &local->pending_packet[i];
1661 kfree_skb(store->skb); 1694 kfree_skb(store->skb);
1662 for (j = 0; j < store->num_extra_frag; j++) 1695 for (j = 0; j < store->num_extra_frag; j++)
1663 kfree_skb(store->extra_frag[j]); 1696 kfree_skb(store->extra_frag[j]);
1664 kfree(store->extra_frag); 1697 kfree(store->extra_frag);
1665 clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); 1698 clear_bit(i, local->queues_pending);
1666 } 1699 }
1667} 1700}
1668 1701
1702/*
1703 * Transmit all pending packets. Called from tasklet, locks master device
1704 * TX lock so that no new packets can come in.
1705 */
1669void ieee80211_tx_pending(unsigned long data) 1706void ieee80211_tx_pending(unsigned long data)
1670{ 1707{
1671 struct ieee80211_local *local = (struct ieee80211_local *)data; 1708 struct ieee80211_local *local = (struct ieee80211_local *)data;
1672 struct net_device *dev = local->mdev; 1709 struct net_device *dev = local->mdev;
1673 struct ieee80211_tx_stored_packet *store; 1710 struct ieee80211_tx_stored_packet *store;
1674 struct ieee80211_tx_data tx; 1711 struct ieee80211_tx_data tx;
1675 int i, ret, reschedule = 0; 1712 int i, ret;
1676 1713
1677 netif_tx_lock_bh(dev); 1714 netif_tx_lock_bh(dev);
1678 for (i = 0; i < local->hw.queues; i++) { 1715 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1679 if (__ieee80211_queue_stopped(local, i)) 1716 /* Check that this queue is ok */
1717 if (__netif_subqueue_stopped(local->mdev, i))
1680 continue; 1718 continue;
1681 if (!__ieee80211_queue_pending(local, i)) { 1719
1682 reschedule = 1; 1720 if (!test_bit(i, local->queues_pending)) {
1721 ieee80211_wake_queue(&local->hw, i);
1683 continue; 1722 continue;
1684 } 1723 }
1724
1685 store = &local->pending_packet[i]; 1725 store = &local->pending_packet[i];
1686 tx.control = &store->control;
1687 tx.extra_frag = store->extra_frag; 1726 tx.extra_frag = store->extra_frag;
1688 tx.num_extra_frag = store->num_extra_frag; 1727 tx.num_extra_frag = store->num_extra_frag;
1689 tx.last_frag_rate = store->last_frag_rate; 1728 tx.last_frag_rate_idx = store->last_frag_rate_idx;
1690 tx.flags = 0; 1729 tx.flags = 0;
1691 if (store->last_frag_rate_ctrl_probe) 1730 if (store->last_frag_rate_ctrl_probe)
1692 tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; 1731 tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG;
@@ -1695,19 +1734,11 @@ void ieee80211_tx_pending(unsigned long data)
1695 if (ret == IEEE80211_TX_FRAG_AGAIN) 1734 if (ret == IEEE80211_TX_FRAG_AGAIN)
1696 store->skb = NULL; 1735 store->skb = NULL;
1697 } else { 1736 } else {
1698 clear_bit(IEEE80211_LINK_STATE_PENDING, 1737 clear_bit(i, local->queues_pending);
1699 &local->state[i]); 1738 ieee80211_wake_queue(&local->hw, i);
1700 reschedule = 1;
1701 } 1739 }
1702 } 1740 }
1703 netif_tx_unlock_bh(dev); 1741 netif_tx_unlock_bh(dev);
1704 if (reschedule) {
1705 if (!ieee80211_qdisc_installed(dev)) {
1706 if (!__ieee80211_queue_stopped(local, 0))
1707 netif_wake_queue(dev);
1708 } else
1709 netif_schedule(dev);
1710 }
1711} 1742}
1712 1743
1713/* functions for drivers to get certain frames */ 1744/* functions for drivers to get certain frames */
@@ -1776,23 +1807,24 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local,
1776} 1807}
1777 1808
1778struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, 1809struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1779 struct ieee80211_vif *vif, 1810 struct ieee80211_vif *vif)
1780 struct ieee80211_tx_control *control)
1781{ 1811{
1782 struct ieee80211_local *local = hw_to_local(hw); 1812 struct ieee80211_local *local = hw_to_local(hw);
1783 struct sk_buff *skb; 1813 struct sk_buff *skb = NULL;
1814 struct ieee80211_tx_info *info;
1784 struct net_device *bdev; 1815 struct net_device *bdev;
1785 struct ieee80211_sub_if_data *sdata = NULL; 1816 struct ieee80211_sub_if_data *sdata = NULL;
1786 struct ieee80211_if_ap *ap = NULL; 1817 struct ieee80211_if_ap *ap = NULL;
1818 struct ieee80211_if_sta *ifsta = NULL;
1787 struct rate_selection rsel; 1819 struct rate_selection rsel;
1788 struct beacon_data *beacon; 1820 struct beacon_data *beacon;
1789 struct ieee80211_supported_band *sband; 1821 struct ieee80211_supported_band *sband;
1790 struct ieee80211_mgmt *mgmt; 1822 struct ieee80211_mgmt *mgmt;
1791 int *num_beacons; 1823 int *num_beacons;
1792 bool err = true; 1824 enum ieee80211_band band = local->hw.conf.channel->band;
1793 u8 *pos; 1825 u8 *pos;
1794 1826
1795 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1827 sband = local->hw.wiphy->bands[band];
1796 1828
1797 rcu_read_lock(); 1829 rcu_read_lock();
1798 1830
@@ -1817,9 +1849,6 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1817 memcpy(skb_put(skb, beacon->head_len), beacon->head, 1849 memcpy(skb_put(skb, beacon->head_len), beacon->head,
1818 beacon->head_len); 1850 beacon->head_len);
1819 1851
1820 ieee80211_include_sequence(sdata,
1821 (struct ieee80211_hdr *)skb->data);
1822
1823 /* 1852 /*
1824 * Not very nice, but we want to allow the driver to call 1853 * Not very nice, but we want to allow the driver to call
1825 * ieee80211_beacon_get() as a response to the set_tim() 1854 * ieee80211_beacon_get() as a response to the set_tim()
@@ -1842,9 +1871,24 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1842 beacon->tail, beacon->tail_len); 1871 beacon->tail, beacon->tail_len);
1843 1872
1844 num_beacons = &ap->num_beacons; 1873 num_beacons = &ap->num_beacons;
1874 } else
1875 goto out;
1876 } else if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
1877 struct ieee80211_hdr *hdr;
1878 ifsta = &sdata->u.sta;
1845 1879
1846 err = false; 1880 if (!ifsta->probe_resp)
1847 } 1881 goto out;
1882
1883 skb = skb_copy(ifsta->probe_resp, GFP_ATOMIC);
1884 if (!skb)
1885 goto out;
1886
1887 hdr = (struct ieee80211_hdr *) skb->data;
1888 hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1889 IEEE80211_STYPE_BEACON);
1890
1891 num_beacons = &ifsta->num_beacons;
1848 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 1892 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
1849 /* headroom, head length, tail length and maximum TIM length */ 1893 /* headroom, head length, tail length and maximum TIM length */
1850 skb = dev_alloc_skb(local->tx_headroom + 400); 1894 skb = dev_alloc_skb(local->tx_headroom + 400);
@@ -1855,8 +1899,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1855 mgmt = (struct ieee80211_mgmt *) 1899 mgmt = (struct ieee80211_mgmt *)
1856 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 1900 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
1857 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 1901 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
1858 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1902 mgmt->frame_control =
1859 IEEE80211_STYPE_BEACON); 1903 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
1860 memset(mgmt->da, 0xff, ETH_ALEN); 1904 memset(mgmt->da, 0xff, ETH_ALEN);
1861 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 1905 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1862 /* BSSID is left zeroed, wildcard value */ 1906 /* BSSID is left zeroed, wildcard value */
@@ -1871,44 +1915,41 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1871 mesh_mgmt_ies_add(skb, sdata->dev); 1915 mesh_mgmt_ies_add(skb, sdata->dev);
1872 1916
1873 num_beacons = &sdata->u.sta.num_beacons; 1917 num_beacons = &sdata->u.sta.num_beacons;
1874 1918 } else {
1875 err = false; 1919 WARN_ON(1);
1920 goto out;
1876 } 1921 }
1877 1922
1878 if (err) { 1923 info = IEEE80211_SKB_CB(skb);
1879#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1924
1880 if (net_ratelimit()) 1925 info->band = band;
1881 printk(KERN_DEBUG "no beacon data avail for %s\n", 1926 rate_control_get_rate(local->mdev, sband, skb, &rsel);
1882 bdev->name); 1927
1883#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 1928 if (unlikely(rsel.rate_idx < 0)) {
1929 if (net_ratelimit()) {
1930 printk(KERN_DEBUG "%s: ieee80211_beacon_get: "
1931 "no rate found\n",
1932 wiphy_name(local->hw.wiphy));
1933 }
1934 dev_kfree_skb(skb);
1884 skb = NULL; 1935 skb = NULL;
1885 goto out; 1936 goto out;
1886 } 1937 }
1887 1938
1888 if (control) { 1939 info->control.vif = vif;
1889 rate_control_get_rate(local->mdev, sband, skb, &rsel); 1940 info->tx_rate_idx = rsel.rate_idx;
1890 if (!rsel.rate) { 1941
1891 if (net_ratelimit()) { 1942 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1892 printk(KERN_DEBUG "%s: ieee80211_beacon_get: " 1943 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
1893 "no rate found\n", 1944 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1894 wiphy_name(local->hw.wiphy)); 1945 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
1895 } 1946 if (sdata->bss_conf.use_short_preamble &&
1896 dev_kfree_skb(skb); 1947 sband->bitrates[rsel.rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE)
1897 skb = NULL; 1948 info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
1898 goto out; 1949
1899 } 1950 info->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1951 info->control.retry_limit = 1;
1900 1952
1901 control->vif = vif;
1902 control->tx_rate = rsel.rate;
1903 if (sdata->bss_conf.use_short_preamble &&
1904 rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
1905 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
1906 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1907 control->flags |= IEEE80211_TXCTL_NO_ACK;
1908 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
1909 control->retry_limit = 1;
1910 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
1911 }
1912 (*num_beacons)++; 1953 (*num_beacons)++;
1913out: 1954out:
1914 rcu_read_unlock(); 1955 rcu_read_unlock();
@@ -1918,14 +1959,13 @@ EXPORT_SYMBOL(ieee80211_beacon_get);
1918 1959
1919void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1960void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1920 const void *frame, size_t frame_len, 1961 const void *frame, size_t frame_len,
1921 const struct ieee80211_tx_control *frame_txctl, 1962 const struct ieee80211_tx_info *frame_txctl,
1922 struct ieee80211_rts *rts) 1963 struct ieee80211_rts *rts)
1923{ 1964{
1924 const struct ieee80211_hdr *hdr = frame; 1965 const struct ieee80211_hdr *hdr = frame;
1925 u16 fctl;
1926 1966
1927 fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS; 1967 rts->frame_control =
1928 rts->frame_control = cpu_to_le16(fctl); 1968 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
1929 rts->duration = ieee80211_rts_duration(hw, vif, frame_len, 1969 rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
1930 frame_txctl); 1970 frame_txctl);
1931 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); 1971 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
@@ -1935,14 +1975,13 @@ EXPORT_SYMBOL(ieee80211_rts_get);
1935 1975
1936void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1976void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1937 const void *frame, size_t frame_len, 1977 const void *frame, size_t frame_len,
1938 const struct ieee80211_tx_control *frame_txctl, 1978 const struct ieee80211_tx_info *frame_txctl,
1939 struct ieee80211_cts *cts) 1979 struct ieee80211_cts *cts)
1940{ 1980{
1941 const struct ieee80211_hdr *hdr = frame; 1981 const struct ieee80211_hdr *hdr = frame;
1942 u16 fctl;
1943 1982
1944 fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS; 1983 cts->frame_control =
1945 cts->frame_control = cpu_to_le16(fctl); 1984 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
1946 cts->duration = ieee80211_ctstoself_duration(hw, vif, 1985 cts->duration = ieee80211_ctstoself_duration(hw, vif,
1947 frame_len, frame_txctl); 1986 frame_len, frame_txctl);
1948 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); 1987 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
@@ -1951,23 +1990,21 @@ EXPORT_SYMBOL(ieee80211_ctstoself_get);
1951 1990
1952struct sk_buff * 1991struct sk_buff *
1953ieee80211_get_buffered_bc(struct ieee80211_hw *hw, 1992ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1954 struct ieee80211_vif *vif, 1993 struct ieee80211_vif *vif)
1955 struct ieee80211_tx_control *control)
1956{ 1994{
1957 struct ieee80211_local *local = hw_to_local(hw); 1995 struct ieee80211_local *local = hw_to_local(hw);
1958 struct sk_buff *skb; 1996 struct sk_buff *skb = NULL;
1959 struct sta_info *sta; 1997 struct sta_info *sta;
1960 ieee80211_tx_handler *handler;
1961 struct ieee80211_tx_data tx; 1998 struct ieee80211_tx_data tx;
1962 ieee80211_tx_result res = TX_DROP;
1963 struct net_device *bdev; 1999 struct net_device *bdev;
1964 struct ieee80211_sub_if_data *sdata; 2000 struct ieee80211_sub_if_data *sdata;
1965 struct ieee80211_if_ap *bss = NULL; 2001 struct ieee80211_if_ap *bss = NULL;
1966 struct beacon_data *beacon; 2002 struct beacon_data *beacon;
2003 struct ieee80211_tx_info *info;
1967 2004
1968 sdata = vif_to_sdata(vif); 2005 sdata = vif_to_sdata(vif);
1969 bdev = sdata->dev; 2006 bdev = sdata->dev;
1970 2007 bss = &sdata->u.ap;
1971 2008
1972 if (!bss) 2009 if (!bss)
1973 return NULL; 2010 return NULL;
@@ -1975,19 +2012,16 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1975 rcu_read_lock(); 2012 rcu_read_lock();
1976 beacon = rcu_dereference(bss->beacon); 2013 beacon = rcu_dereference(bss->beacon);
1977 2014
1978 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || 2015 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head)
1979 !beacon->head) { 2016 goto out;
1980 rcu_read_unlock();
1981 return NULL;
1982 }
1983 2017
1984 if (bss->dtim_count != 0) 2018 if (bss->dtim_count != 0)
1985 return NULL; /* send buffered bc/mc only after DTIM beacon */ 2019 goto out; /* send buffered bc/mc only after DTIM beacon */
1986 memset(control, 0, sizeof(*control)); 2020
1987 while (1) { 2021 while (1) {
1988 skb = skb_dequeue(&bss->ps_bc_buf); 2022 skb = skb_dequeue(&bss->ps_bc_buf);
1989 if (!skb) 2023 if (!skb)
1990 return NULL; 2024 goto out;
1991 local->total_ps_buffered--; 2025 local->total_ps_buffered--;
1992 2026
1993 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { 2027 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) {
@@ -2000,30 +2034,21 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2000 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2034 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2001 } 2035 }
2002 2036
2003 if (!ieee80211_tx_prepare(&tx, skb, local->mdev, control)) 2037 if (!ieee80211_tx_prepare(&tx, skb, local->mdev))
2004 break; 2038 break;
2005 dev_kfree_skb_any(skb); 2039 dev_kfree_skb_any(skb);
2006 } 2040 }
2041
2042 info = IEEE80211_SKB_CB(skb);
2043
2007 sta = tx.sta; 2044 sta = tx.sta;
2008 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2045 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2009 tx.channel = local->hw.conf.channel; 2046 tx.channel = local->hw.conf.channel;
2047 info->band = tx.channel->band;
2010 2048
2011 for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { 2049 if (invoke_tx_handlers(&tx))
2012 res = (*handler)(&tx);
2013 if (res == TX_DROP || res == TX_QUEUED)
2014 break;
2015 }
2016 skb = tx.skb; /* handlers are allowed to change skb */
2017
2018 if (res == TX_DROP) {
2019 I802_DEBUG_INC(local->tx_handlers_drop);
2020 dev_kfree_skb(skb);
2021 skb = NULL;
2022 } else if (res == TX_QUEUED) {
2023 I802_DEBUG_INC(local->tx_handlers_queued);
2024 skb = NULL; 2050 skb = NULL;
2025 } 2051 out:
2026
2027 rcu_read_unlock(); 2052 rcu_read_unlock();
2028 2053
2029 return skb; 2054 return skb;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 4e97b266f907..19f85e1b3695 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -45,38 +45,37 @@ const unsigned char bridge_tunnel_header[] __aligned(2) =
45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
46 enum ieee80211_if_types type) 46 enum ieee80211_if_types type)
47{ 47{
48 u16 fc; 48 __le16 fc = hdr->frame_control;
49 49
50 /* drop ACK/CTS frames and incorrect hdr len (ctrl) */ 50 /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
51 if (len < 16) 51 if (len < 16)
52 return NULL; 52 return NULL;
53 53
54 fc = le16_to_cpu(hdr->frame_control); 54 if (ieee80211_is_data(fc)) {
55
56 switch (fc & IEEE80211_FCTL_FTYPE) {
57 case IEEE80211_FTYPE_DATA:
58 if (len < 24) /* drop incorrect hdr len (data) */ 55 if (len < 24) /* drop incorrect hdr len (data) */
59 return NULL; 56 return NULL;
60 switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 57
61 case IEEE80211_FCTL_TODS: 58 if (ieee80211_has_a4(fc))
62 return hdr->addr1;
63 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
64 return NULL; 59 return NULL;
65 case IEEE80211_FCTL_FROMDS: 60 if (ieee80211_has_tods(fc))
61 return hdr->addr1;
62 if (ieee80211_has_fromds(fc))
66 return hdr->addr2; 63 return hdr->addr2;
67 case 0: 64
68 return hdr->addr3; 65 return hdr->addr3;
69 } 66 }
70 break; 67
71 case IEEE80211_FTYPE_MGMT: 68 if (ieee80211_is_mgmt(fc)) {
72 if (len < 24) /* drop incorrect hdr len (mgmt) */ 69 if (len < 24) /* drop incorrect hdr len (mgmt) */
73 return NULL; 70 return NULL;
74 return hdr->addr3; 71 return hdr->addr3;
75 case IEEE80211_FTYPE_CTL: 72 }
76 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL) 73
74 if (ieee80211_is_ctl(fc)) {
75 if(ieee80211_is_pspoll(fc))
77 return hdr->addr1; 76 return hdr->addr1;
78 else if ((fc & IEEE80211_FCTL_STYPE) == 77
79 IEEE80211_STYPE_BACK_REQ) { 78 if (ieee80211_is_back_req(fc)) {
80 switch (type) { 79 switch (type) {
81 case IEEE80211_IF_TYPE_STA: 80 case IEEE80211_IF_TYPE_STA:
82 return hdr->addr2; 81 return hdr->addr2;
@@ -84,11 +83,9 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
84 case IEEE80211_IF_TYPE_VLAN: 83 case IEEE80211_IF_TYPE_VLAN:
85 return hdr->addr1; 84 return hdr->addr1;
86 default: 85 default:
87 return NULL; 86 break; /* fall through to the return */
88 } 87 }
89 } 88 }
90 else
91 return NULL;
92 } 89 }
93 90
94 return NULL; 91 return NULL;
@@ -133,14 +130,46 @@ int ieee80211_get_hdrlen(u16 fc)
133} 130}
134EXPORT_SYMBOL(ieee80211_get_hdrlen); 131EXPORT_SYMBOL(ieee80211_get_hdrlen);
135 132
136int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) 133unsigned int ieee80211_hdrlen(__le16 fc)
137{ 134{
138 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *) skb->data; 135 unsigned int hdrlen = 24;
139 int hdrlen; 136
137 if (ieee80211_is_data(fc)) {
138 if (ieee80211_has_a4(fc))
139 hdrlen = 30;
140 if (ieee80211_is_data_qos(fc))
141 hdrlen += IEEE80211_QOS_CTL_LEN;
142 goto out;
143 }
144
145 if (ieee80211_is_ctl(fc)) {
146 /*
147 * ACK and CTS are 10 bytes, all others 16. To see how
148 * to get this condition consider
149 * subtype mask: 0b0000000011110000 (0x00F0)
150 * ACK subtype: 0b0000000011010000 (0x00D0)
151 * CTS subtype: 0b0000000011000000 (0x00C0)
152 * bits that matter: ^^^ (0x00E0)
153 * value of those: 0b0000000011000000 (0x00C0)
154 */
155 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
156 hdrlen = 10;
157 else
158 hdrlen = 16;
159 }
160out:
161 return hdrlen;
162}
163EXPORT_SYMBOL(ieee80211_hdrlen);
164
165unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
166{
167 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data;
168 unsigned int hdrlen;
140 169
141 if (unlikely(skb->len < 10)) 170 if (unlikely(skb->len < 10))
142 return 0; 171 return 0;
143 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); 172 hdrlen = ieee80211_hdrlen(hdr->frame_control);
144 if (unlikely(hdrlen > skb->len)) 173 if (unlikely(hdrlen > skb->len))
145 return 0; 174 return 0;
146 return hdrlen; 175 return hdrlen;
@@ -258,7 +287,7 @@ EXPORT_SYMBOL(ieee80211_generic_frame_duration);
258 287
259__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, 288__le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
260 struct ieee80211_vif *vif, size_t frame_len, 289 struct ieee80211_vif *vif, size_t frame_len,
261 const struct ieee80211_tx_control *frame_txctl) 290 const struct ieee80211_tx_info *frame_txctl)
262{ 291{
263 struct ieee80211_local *local = hw_to_local(hw); 292 struct ieee80211_local *local = hw_to_local(hw);
264 struct ieee80211_rate *rate; 293 struct ieee80211_rate *rate;
@@ -266,10 +295,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
266 bool short_preamble; 295 bool short_preamble;
267 int erp; 296 int erp;
268 u16 dur; 297 u16 dur;
298 struct ieee80211_supported_band *sband;
299
300 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
269 301
270 short_preamble = sdata->bss_conf.use_short_preamble; 302 short_preamble = sdata->bss_conf.use_short_preamble;
271 303
272 rate = frame_txctl->rts_cts_rate; 304 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
273 305
274 erp = 0; 306 erp = 0;
275 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 307 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
@@ -292,7 +324,7 @@ EXPORT_SYMBOL(ieee80211_rts_duration);
292__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, 324__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
293 struct ieee80211_vif *vif, 325 struct ieee80211_vif *vif,
294 size_t frame_len, 326 size_t frame_len,
295 const struct ieee80211_tx_control *frame_txctl) 327 const struct ieee80211_tx_info *frame_txctl)
296{ 328{
297 struct ieee80211_local *local = hw_to_local(hw); 329 struct ieee80211_local *local = hw_to_local(hw);
298 struct ieee80211_rate *rate; 330 struct ieee80211_rate *rate;
@@ -300,10 +332,13 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
300 bool short_preamble; 332 bool short_preamble;
301 int erp; 333 int erp;
302 u16 dur; 334 u16 dur;
335 struct ieee80211_supported_band *sband;
336
337 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
303 338
304 short_preamble = sdata->bss_conf.use_short_preamble; 339 short_preamble = sdata->bss_conf.use_short_preamble;
305 340
306 rate = frame_txctl->rts_cts_rate; 341 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
307 erp = 0; 342 erp = 0;
308 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 343 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
309 erp = rate->flags & IEEE80211_RATE_ERP_G; 344 erp = rate->flags & IEEE80211_RATE_ERP_G;
@@ -311,7 +346,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
311 /* Data frame duration */ 346 /* Data frame duration */
312 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, 347 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
313 erp, short_preamble); 348 erp, short_preamble);
314 if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { 349 if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
315 /* ACK duration */ 350 /* ACK duration */
316 dur += ieee80211_frame_duration(local, 10, rate->bitrate, 351 dur += ieee80211_frame_duration(local, 10, rate->bitrate,
317 erp, short_preamble); 352 erp, short_preamble);
@@ -325,17 +360,10 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
325{ 360{
326 struct ieee80211_local *local = hw_to_local(hw); 361 struct ieee80211_local *local = hw_to_local(hw);
327 362
328 if (test_and_clear_bit(IEEE80211_LINK_STATE_XOFF, 363 if (test_bit(queue, local->queues_pending)) {
329 &local->state[queue])) { 364 tasklet_schedule(&local->tx_pending_tasklet);
330 if (test_bit(IEEE80211_LINK_STATE_PENDING, 365 } else {
331 &local->state[queue])) 366 netif_wake_subqueue(local->mdev, queue);
332 tasklet_schedule(&local->tx_pending_tasklet);
333 else
334 if (!ieee80211_qdisc_installed(local->mdev)) {
335 if (queue == 0)
336 netif_wake_queue(local->mdev);
337 } else
338 __netif_schedule(local->mdev);
339 } 367 }
340} 368}
341EXPORT_SYMBOL(ieee80211_wake_queue); 369EXPORT_SYMBOL(ieee80211_wake_queue);
@@ -344,29 +372,15 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
344{ 372{
345 struct ieee80211_local *local = hw_to_local(hw); 373 struct ieee80211_local *local = hw_to_local(hw);
346 374
347 if (!ieee80211_qdisc_installed(local->mdev) && queue == 0) 375 netif_stop_subqueue(local->mdev, queue);
348 netif_stop_queue(local->mdev);
349 set_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]);
350} 376}
351EXPORT_SYMBOL(ieee80211_stop_queue); 377EXPORT_SYMBOL(ieee80211_stop_queue);
352 378
353void ieee80211_start_queues(struct ieee80211_hw *hw)
354{
355 struct ieee80211_local *local = hw_to_local(hw);
356 int i;
357
358 for (i = 0; i < local->hw.queues; i++)
359 clear_bit(IEEE80211_LINK_STATE_XOFF, &local->state[i]);
360 if (!ieee80211_qdisc_installed(local->mdev))
361 netif_start_queue(local->mdev);
362}
363EXPORT_SYMBOL(ieee80211_start_queues);
364
365void ieee80211_stop_queues(struct ieee80211_hw *hw) 379void ieee80211_stop_queues(struct ieee80211_hw *hw)
366{ 380{
367 int i; 381 int i;
368 382
369 for (i = 0; i < hw->queues; i++) 383 for (i = 0; i < ieee80211_num_queues(hw); i++)
370 ieee80211_stop_queue(hw, i); 384 ieee80211_stop_queue(hw, i);
371} 385}
372EXPORT_SYMBOL(ieee80211_stop_queues); 386EXPORT_SYMBOL(ieee80211_stop_queues);
@@ -375,7 +389,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw)
375{ 389{
376 int i; 390 int i;
377 391
378 for (i = 0; i < hw->queues; i++) 392 for (i = 0; i < hw->queues + hw->ampdu_queues; i++)
379 ieee80211_wake_queue(hw, i); 393 ieee80211_wake_queue(hw, i);
380} 394}
381EXPORT_SYMBOL(ieee80211_wake_queues); 395EXPORT_SYMBOL(ieee80211_wake_queues);
@@ -404,8 +418,6 @@ void ieee80211_iterate_active_interfaces(
404 case IEEE80211_IF_TYPE_MESH_POINT: 418 case IEEE80211_IF_TYPE_MESH_POINT:
405 break; 419 break;
406 } 420 }
407 if (sdata->dev == local->mdev)
408 continue;
409 if (netif_running(sdata->dev)) 421 if (netif_running(sdata->dev))
410 iterator(data, sdata->dev->dev_addr, 422 iterator(data, sdata->dev->dev_addr,
411 &sdata->vif); 423 &sdata->vif);
@@ -439,8 +451,6 @@ void ieee80211_iterate_active_interfaces_atomic(
439 case IEEE80211_IF_TYPE_MESH_POINT: 451 case IEEE80211_IF_TYPE_MESH_POINT:
440 break; 452 break;
441 } 453 }
442 if (sdata->dev == local->mdev)
443 continue;
444 if (netif_running(sdata->dev)) 454 if (netif_running(sdata->dev))
445 iterator(data, sdata->dev->dev_addr, 455 iterator(data, sdata->dev->dev_addr,
446 &sdata->vif); 456 &sdata->vif);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index affcecd78c10..872d2fcd1a5b 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -84,24 +84,17 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
84 struct sk_buff *skb, 84 struct sk_buff *skb,
85 struct ieee80211_key *key) 85 struct ieee80211_key *key)
86{ 86{
87 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 87 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
88 u16 fc; 88 unsigned int hdrlen;
89 int hdrlen;
90 u8 *newhdr; 89 u8 *newhdr;
91 90
92 fc = le16_to_cpu(hdr->frame_control); 91 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
93 fc |= IEEE80211_FCTL_PROTECTED;
94 hdr->frame_control = cpu_to_le16(fc);
95 92
96 if ((skb_headroom(skb) < WEP_IV_LEN || 93 if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
97 skb_tailroom(skb) < WEP_ICV_LEN)) { 94 skb_headroom(skb) < WEP_IV_LEN))
98 I802_DEBUG_INC(local->tx_expand_skb_head); 95 return NULL;
99 if (unlikely(pskb_expand_head(skb, WEP_IV_LEN, WEP_ICV_LEN,
100 GFP_ATOMIC)))
101 return NULL;
102 }
103 96
104 hdrlen = ieee80211_get_hdrlen(fc); 97 hdrlen = ieee80211_hdrlen(hdr->frame_control);
105 newhdr = skb_push(skb, WEP_IV_LEN); 98 newhdr = skb_push(skb, WEP_IV_LEN);
106 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); 99 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
107 ieee80211_wep_get_iv(local, key, newhdr + hdrlen); 100 ieee80211_wep_get_iv(local, key, newhdr + hdrlen);
@@ -113,12 +106,10 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
113 struct sk_buff *skb, 106 struct sk_buff *skb,
114 struct ieee80211_key *key) 107 struct ieee80211_key *key)
115{ 108{
116 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 109 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
117 u16 fc; 110 unsigned int hdrlen;
118 int hdrlen;
119 111
120 fc = le16_to_cpu(hdr->frame_control); 112 hdrlen = ieee80211_hdrlen(hdr->frame_control);
121 hdrlen = ieee80211_get_hdrlen(fc);
122 memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); 113 memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
123 skb_pull(skb, WEP_IV_LEN); 114 skb_pull(skb, WEP_IV_LEN);
124} 115}
@@ -228,17 +219,15 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
228 u32 klen; 219 u32 klen;
229 u8 *rc4key; 220 u8 *rc4key;
230 u8 keyidx; 221 u8 keyidx;
231 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 222 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
232 u16 fc; 223 unsigned int hdrlen;
233 int hdrlen;
234 size_t len; 224 size_t len;
235 int ret = 0; 225 int ret = 0;
236 226
237 fc = le16_to_cpu(hdr->frame_control); 227 if (!ieee80211_has_protected(hdr->frame_control))
238 if (!(fc & IEEE80211_FCTL_PROTECTED))
239 return -1; 228 return -1;
240 229
241 hdrlen = ieee80211_get_hdrlen(fc); 230 hdrlen = ieee80211_hdrlen(hdr->frame_control);
242 231
243 if (skb->len < 8 + hdrlen) 232 if (skb->len < 8 + hdrlen)
244 return -1; 233 return -1;
@@ -264,11 +253,8 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
264 253
265 if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, 254 if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen,
266 skb->data + hdrlen + WEP_IV_LEN, 255 skb->data + hdrlen + WEP_IV_LEN,
267 len)) { 256 len))
268 if (net_ratelimit())
269 printk(KERN_DEBUG "WEP decrypt failed (ICV)\n");
270 ret = -1; 257 ret = -1;
271 }
272 258
273 kfree(rc4key); 259 kfree(rc4key);
274 260
@@ -285,17 +271,15 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
285 271
286u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) 272u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
287{ 273{
288 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 274 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
289 u16 fc; 275 unsigned int hdrlen;
290 int hdrlen;
291 u8 *ivpos; 276 u8 *ivpos;
292 u32 iv; 277 u32 iv;
293 278
294 fc = le16_to_cpu(hdr->frame_control); 279 if (!ieee80211_has_protected(hdr->frame_control))
295 if (!(fc & IEEE80211_FCTL_PROTECTED))
296 return NULL; 280 return NULL;
297 281
298 hdrlen = ieee80211_get_hdrlen(fc); 282 hdrlen = ieee80211_hdrlen(hdr->frame_control);
299 ivpos = skb->data + hdrlen; 283 ivpos = skb->data + hdrlen;
300 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; 284 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
301 285
@@ -314,14 +298,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
314 return RX_CONTINUE; 298 return RX_CONTINUE;
315 299
316 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
317 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { 301 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
318#ifdef CONFIG_MAC80211_DEBUG
319 if (net_ratelimit())
320 printk(KERN_DEBUG "%s: RX WEP frame, decrypt "
321 "failed\n", rx->dev->name);
322#endif /* CONFIG_MAC80211_DEBUG */
323 return RX_DROP_UNUSABLE; 302 return RX_DROP_UNUSABLE;
324 }
325 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { 303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
326 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
327 /* remove ICV */ 305 /* remove ICV */
@@ -333,11 +311,16 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
333 311
334static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) 312static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
335{ 313{
314 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
315
316 info->control.iv_len = WEP_IV_LEN;
317 info->control.icv_len = WEP_ICV_LEN;
318
336 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 319 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
337 if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) 320 if (ieee80211_wep_encrypt(tx->local, skb, tx->key))
338 return -1; 321 return -1;
339 } else { 322 } else {
340 tx->control->key_idx = tx->key->conf.hw_key_idx; 323 info->control.hw_key = &tx->key->conf;
341 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { 324 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) {
342 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) 325 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key))
343 return -1; 326 return -1;
@@ -349,8 +332,6 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
349ieee80211_tx_result 332ieee80211_tx_result
350ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) 333ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
351{ 334{
352 tx->control->iv_len = WEP_IV_LEN;
353 tx->control->icv_len = WEP_ICV_LEN;
354 ieee80211_tx_set_protected(tx); 335 ieee80211_tx_set_protected(tx);
355 336
356 if (wep_encrypt_skb(tx, tx->skb) < 0) { 337 if (wep_encrypt_skb(tx, tx->skb) < 0) {
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 363779c50658..e587172115b8 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -26,7 +26,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb,
26 struct ieee80211_key *key); 26 struct ieee80211_key *key);
27int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, 27int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
28 struct ieee80211_key *key); 28 struct ieee80211_key *key);
29u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 29u8 *ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
30 30
31ieee80211_rx_result 31ieee80211_rx_result
32ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); 32ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx);
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index e8404212ad57..34fa8ed1e784 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -142,7 +142,39 @@ static int ieee80211_ioctl_giwname(struct net_device *dev,
142 struct iw_request_info *info, 142 struct iw_request_info *info,
143 char *name, char *extra) 143 char *name, char *extra)
144{ 144{
145 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
146 struct ieee80211_supported_band *sband;
147 u8 is_ht = 0, is_a = 0, is_b = 0, is_g = 0;
148
149
150 sband = local->hw.wiphy->bands[IEEE80211_BAND_5GHZ];
151 if (sband) {
152 is_a = 1;
153 is_ht |= sband->ht_info.ht_supported;
154 }
155
156 sband = local->hw.wiphy->bands[IEEE80211_BAND_2GHZ];
157 if (sband) {
158 int i;
159 /* Check for mandatory rates */
160 for (i = 0; i < sband->n_bitrates; i++) {
161 if (sband->bitrates[i].bitrate == 10)
162 is_b = 1;
163 if (sband->bitrates[i].bitrate == 60)
164 is_g = 1;
165 }
166 is_ht |= sband->ht_info.ht_supported;
167 }
168
145 strcpy(name, "IEEE 802.11"); 169 strcpy(name, "IEEE 802.11");
170 if (is_a)
171 strcat(name, "a");
172 if (is_b)
173 strcat(name, "b");
174 if (is_g)
175 strcat(name, "g");
176 if (is_ht)
177 strcat(name, "n");
146 178
147 return 0; 179 return 0;
148} 180}
@@ -176,14 +208,26 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
176 range->num_encoding_sizes = 2; 208 range->num_encoding_sizes = 2;
177 range->max_encoding_tokens = NUM_DEFAULT_KEYS; 209 range->max_encoding_tokens = NUM_DEFAULT_KEYS;
178 210
179 range->max_qual.qual = local->hw.max_signal; 211 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC ||
180 range->max_qual.level = local->hw.max_rssi; 212 local->hw.flags & IEEE80211_HW_SIGNAL_DB)
181 range->max_qual.noise = local->hw.max_noise; 213 range->max_qual.level = local->hw.max_signal;
214 else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
215 range->max_qual.level = -110;
216 else
217 range->max_qual.level = 0;
218
219 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
220 range->max_qual.noise = -110;
221 else
222 range->max_qual.noise = 0;
223
224 range->max_qual.qual = 100;
182 range->max_qual.updated = local->wstats_flags; 225 range->max_qual.updated = local->wstats_flags;
183 226
184 range->avg_qual.qual = local->hw.max_signal/2; 227 range->avg_qual.qual = 50;
185 range->avg_qual.level = 0; 228 /* not always true but better than nothing */
186 range->avg_qual.noise = 0; 229 range->avg_qual.level = range->max_qual.level / 2;
230 range->avg_qual.noise = range->max_qual.noise / 2;
187 range->avg_qual.updated = local->wstats_flags; 231 range->avg_qual.updated = local->wstats_flags;
188 232
189 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 233 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
@@ -252,15 +296,7 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
252 return -EINVAL; 296 return -EINVAL;
253 } 297 }
254 298
255 if (type == sdata->vif.type) 299 return ieee80211_if_change_type(sdata, type);
256 return 0;
257 if (netif_running(dev))
258 return -EBUSY;
259
260 ieee80211_if_reinit(dev);
261 ieee80211_if_set_type(dev, type);
262
263 return 0;
264} 300}
265 301
266 302
@@ -408,7 +444,7 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
408 memset(sdata->u.ap.ssid + len, 0, 444 memset(sdata->u.ap.ssid + len, 0,
409 IEEE80211_MAX_SSID_LEN - len); 445 IEEE80211_MAX_SSID_LEN - len);
410 sdata->u.ap.ssid_len = len; 446 sdata->u.ap.ssid_len = len;
411 return ieee80211_if_config(dev); 447 return ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
412 } 448 }
413 return -EOPNOTSUPP; 449 return -EOPNOTSUPP;
414} 450}
@@ -562,7 +598,7 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev,
562 if (local->sta_sw_scanning || local->sta_hw_scanning) 598 if (local->sta_sw_scanning || local->sta_hw_scanning)
563 return -EAGAIN; 599 return -EAGAIN;
564 600
565 res = ieee80211_sta_scan_results(dev, extra, data->length); 601 res = ieee80211_sta_scan_results(dev, info, extra, data->length);
566 if (res >= 0) { 602 if (res >= 0) {
567 data->length = res; 603 data->length = res;
568 return 0; 604 return 0;
@@ -583,16 +619,14 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev,
583 struct ieee80211_supported_band *sband; 619 struct ieee80211_supported_band *sband;
584 620
585 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 621 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
586 if (!sdata->bss)
587 return -ENODEV;
588 622
589 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 623 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
590 624
591 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates 625 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
592 * target_rate = X, rate->fixed = 1 means only rate X 626 * target_rate = X, rate->fixed = 1 means only rate X
593 * target_rate = X, rate->fixed = 0 means all rates <= X */ 627 * target_rate = X, rate->fixed = 0 means all rates <= X */
594 sdata->bss->max_ratectrl_rateidx = -1; 628 sdata->max_ratectrl_rateidx = -1;
595 sdata->bss->force_unicast_rateidx = -1; 629 sdata->force_unicast_rateidx = -1;
596 if (rate->value < 0) 630 if (rate->value < 0)
597 return 0; 631 return 0;
598 632
@@ -601,9 +635,9 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev,
601 int this_rate = brate->bitrate; 635 int this_rate = brate->bitrate;
602 636
603 if (target_rate == this_rate) { 637 if (target_rate == this_rate) {
604 sdata->bss->max_ratectrl_rateidx = i; 638 sdata->max_ratectrl_rateidx = i;
605 if (rate->fixed) 639 if (rate->fixed)
606 sdata->bss->force_unicast_rateidx = i; 640 sdata->force_unicast_rateidx = i;
607 err = 0; 641 err = 0;
608 break; 642 break;
609 } 643 }
@@ -716,6 +750,9 @@ static int ieee80211_ioctl_siwrts(struct net_device *dev,
716 750
717 if (rts->disabled) 751 if (rts->disabled)
718 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 752 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
753 else if (!rts->fixed)
754 /* if the rts value is not fixed, then take default */
755 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
719 else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD) 756 else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD)
720 return -EINVAL; 757 return -EINVAL;
721 else 758 else
@@ -753,6 +790,8 @@ static int ieee80211_ioctl_siwfrag(struct net_device *dev,
753 790
754 if (frag->disabled) 791 if (frag->disabled)
755 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 792 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
793 else if (!frag->fixed)
794 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
756 else if (frag->value < 256 || 795 else if (frag->value < 256 ||
757 frag->value > IEEE80211_MAX_FRAG_THRESHOLD) 796 frag->value > IEEE80211_MAX_FRAG_THRESHOLD)
758 return -EINVAL; 797 return -EINVAL;
@@ -944,6 +983,58 @@ static int ieee80211_ioctl_giwencode(struct net_device *dev,
944 erq->length = sdata->keys[idx]->conf.keylen; 983 erq->length = sdata->keys[idx]->conf.keylen;
945 erq->flags |= IW_ENCODE_ENABLED; 984 erq->flags |= IW_ENCODE_ENABLED;
946 985
986 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
987 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
988 switch (ifsta->auth_alg) {
989 case WLAN_AUTH_OPEN:
990 case WLAN_AUTH_LEAP:
991 erq->flags |= IW_ENCODE_OPEN;
992 break;
993 case WLAN_AUTH_SHARED_KEY:
994 erq->flags |= IW_ENCODE_RESTRICTED;
995 break;
996 }
997 }
998
999 return 0;
1000}
1001
1002static int ieee80211_ioctl_siwpower(struct net_device *dev,
1003 struct iw_request_info *info,
1004 struct iw_param *wrq,
1005 char *extra)
1006{
1007 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1008 struct ieee80211_conf *conf = &local->hw.conf;
1009
1010 if (wrq->disabled) {
1011 conf->flags &= ~IEEE80211_CONF_PS;
1012 return ieee80211_hw_config(local);
1013 }
1014
1015 switch (wrq->flags & IW_POWER_MODE) {
1016 case IW_POWER_ON: /* If not specified */
1017 case IW_POWER_MODE: /* If set all mask */
1018 case IW_POWER_ALL_R: /* If explicitely state all */
1019 conf->flags |= IEEE80211_CONF_PS;
1020 break;
1021 default: /* Otherwise we don't support it */
1022 return -EINVAL;
1023 }
1024
1025 return ieee80211_hw_config(local);
1026}
1027
1028static int ieee80211_ioctl_giwpower(struct net_device *dev,
1029 struct iw_request_info *info,
1030 union iwreq_data *wrqu,
1031 char *extra)
1032{
1033 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1034 struct ieee80211_conf *conf = &local->hw.conf;
1035
1036 wrqu->power.disabled = !(conf->flags & IEEE80211_CONF_PS);
1037
947 return 0; 1038 return 0;
948} 1039}
949 1040
@@ -1015,8 +1106,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
1015 wstats->qual.noise = 0; 1106 wstats->qual.noise = 0;
1016 wstats->qual.updated = IW_QUAL_ALL_INVALID; 1107 wstats->qual.updated = IW_QUAL_ALL_INVALID;
1017 } else { 1108 } else {
1018 wstats->qual.level = sta->last_rssi; 1109 wstats->qual.level = sta->last_signal;
1019 wstats->qual.qual = sta->last_signal; 1110 wstats->qual.qual = sta->last_qual;
1020 wstats->qual.noise = sta->last_noise; 1111 wstats->qual.noise = sta->last_noise;
1021 wstats->qual.updated = local->wstats_flags; 1112 wstats->qual.updated = local->wstats_flags;
1022 } 1113 }
@@ -1149,8 +1240,8 @@ static const iw_handler ieee80211_handler[] =
1149 (iw_handler) ieee80211_ioctl_giwretry, /* SIOCGIWRETRY */ 1240 (iw_handler) ieee80211_ioctl_giwretry, /* SIOCGIWRETRY */
1150 (iw_handler) ieee80211_ioctl_siwencode, /* SIOCSIWENCODE */ 1241 (iw_handler) ieee80211_ioctl_siwencode, /* SIOCSIWENCODE */
1151 (iw_handler) ieee80211_ioctl_giwencode, /* SIOCGIWENCODE */ 1242 (iw_handler) ieee80211_ioctl_giwencode, /* SIOCGIWENCODE */
1152 (iw_handler) NULL, /* SIOCSIWPOWER */ 1243 (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */
1153 (iw_handler) NULL, /* SIOCGIWPOWER */ 1244 (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */
1154 (iw_handler) NULL, /* -- hole -- */ 1245 (iw_handler) NULL, /* -- hole -- */
1155 (iw_handler) NULL, /* -- hole -- */ 1246 (iw_handler) NULL, /* -- hole -- */
1156 (iw_handler) ieee80211_ioctl_siwgenie, /* SIOCSIWGENIE */ 1247 (iw_handler) ieee80211_ioctl_siwgenie, /* SIOCSIWGENIE */
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 5d09e8698b57..6e8099e77043 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -18,61 +18,42 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "wme.h" 19#include "wme.h"
20 20
21/* maximum number of hardware queues we support. */ 21/* Default mapping in classifier to work with default
22#define TC_80211_MAX_QUEUES 16 22 * queue setup.
23 23 */
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25 25
26struct ieee80211_sched_data
27{
28 unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)];
29 struct tcf_proto *filter_list;
30 struct Qdisc *queues[TC_80211_MAX_QUEUES];
31 struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
32};
33
34static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; 26static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
35 27
36/* given a data frame determine the 802.1p/1d tag to use */ 28/* Given a data frame determine the 802.1p/1d tag to use. */
37static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd) 29static unsigned int classify_1d(struct sk_buff *skb)
38{ 30{
39 struct iphdr *ip; 31 unsigned int dscp;
40 int dscp;
41 int offset;
42
43 struct ieee80211_sched_data *q = qdisc_priv(qd);
44 struct tcf_result res = { -1, 0 };
45
46 /* if there is a user set filter list, call out to that */
47 if (q->filter_list) {
48 tc_classify(skb, q->filter_list, &res);
49 if (res.class != -1)
50 return res.class;
51 }
52 32
53 /* skb->priority values from 256->263 are magic values to 33 /* skb->priority values from 256->263 are magic values to
54 * directly indicate a specific 802.1d priority. 34 * directly indicate a specific 802.1d priority. This is used
55 * This is used to allow 802.1d priority to be passed directly in 35 * to allow 802.1d priority to be passed directly in from VLAN
56 * from VLAN tags, etc. */ 36 * tags, etc.
37 */
57 if (skb->priority >= 256 && skb->priority <= 263) 38 if (skb->priority >= 256 && skb->priority <= 263)
58 return skb->priority - 256; 39 return skb->priority - 256;
59 40
60 /* check there is a valid IP header present */ 41 switch (skb->protocol) {
61 offset = ieee80211_get_hdrlen_from_skb(skb); 42 case __constant_htons(ETH_P_IP):
62 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) || 43 dscp = ip_hdr(skb)->tos & 0xfc;
63 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr))) 44 break;
64 return 0;
65 45
66 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr)); 46 default:
47 return 0;
48 }
67 49
68 dscp = ip->tos & 0xfc;
69 if (dscp & 0x1c) 50 if (dscp & 0x1c)
70 return 0; 51 return 0;
71 return dscp >> 5; 52 return dscp >> 5;
72} 53}
73 54
74 55
75static inline int wme_downgrade_ac(struct sk_buff *skb) 56static int wme_downgrade_ac(struct sk_buff *skb)
76{ 57{
77 switch (skb->priority) { 58 switch (skb->priority) {
78 case 6: 59 case 6:
@@ -93,43 +74,38 @@ static inline int wme_downgrade_ac(struct sk_buff *skb)
93} 74}
94 75
95 76
96/* positive return value indicates which queue to use 77/* Indicate which queue to use. */
97 * negative return value indicates to drop the frame */ 78static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
98static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
99{ 79{
100 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 80 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 81 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
102 unsigned short fc = le16_to_cpu(hdr->frame_control);
103 int qos;
104 82
105 /* see if frame is data or non data frame */ 83 if (!ieee80211_is_data(hdr->frame_control)) {
106 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
107 /* management frames go on AC_VO queue, but are sent 84 /* management frames go on AC_VO queue, but are sent
108 * without QoS control fields */ 85 * without QoS control fields */
109 return IEEE80211_TX_QUEUE_DATA0; 86 return 0;
110 } 87 }
111 88
112 if (0 /* injected */) { 89 if (0 /* injected */) {
113 /* use AC from radiotap */ 90 /* use AC from radiotap */
114 } 91 }
115 92
116 /* is this a QoS frame? */ 93 if (!ieee80211_is_data_qos(hdr->frame_control)) {
117 qos = fc & IEEE80211_STYPE_QOS_DATA;
118
119 if (!qos) {
120 skb->priority = 0; /* required for correct WPA/11i MIC */ 94 skb->priority = 0; /* required for correct WPA/11i MIC */
121 return ieee802_1d_to_ac[skb->priority]; 95 return ieee802_1d_to_ac[skb->priority];
122 } 96 }
123 97
124 /* use the data classifier to determine what 802.1d tag the 98 /* use the data classifier to determine what 802.1d tag the
125 * data frame has */ 99 * data frame has */
126 skb->priority = classify_1d(skb, qd); 100 skb->priority = classify_1d(skb);
127 101
128 /* in case we are a client verify acm is not set for this ac */ 102 /* in case we are a client verify acm is not set for this ac */
129 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 103 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 if (wme_downgrade_ac(skb)) { 104 if (wme_downgrade_ac(skb)) {
131 /* No AC with lower priority has acm=0, drop packet. */ 105 /* The old code would drop the packet in this
132 return -1; 106 * case.
107 */
108 return 0;
133 } 109 }
134 } 110 }
135 111
@@ -137,55 +113,52 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
137 return ieee802_1d_to_ac[skb->priority]; 113 return ieee802_1d_to_ac[skb->priority];
138} 114}
139 115
140 116u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{ 117{
143 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
144 struct ieee80211_sched_data *q = qdisc_priv(qd);
145 struct ieee80211_tx_packet_data *pkt_data =
146 (struct ieee80211_tx_packet_data *) skb->cb;
147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 unsigned short fc = le16_to_cpu(hdr->frame_control); 119 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
149 struct Qdisc *qdisc; 120 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
150 int err, queue;
151 struct sta_info *sta; 121 struct sta_info *sta;
122 u16 queue;
152 u8 tid; 123 u8 tid;
153 124
154 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { 125 queue = classify80211(skb, dev);
155 queue = pkt_data->queue; 126 if (unlikely(queue >= local->hw.queues))
127 queue = local->hw.queues - 1;
128
129 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
156 rcu_read_lock(); 130 rcu_read_lock();
157 sta = sta_info_get(local, hdr->addr1); 131 sta = sta_info_get(local, hdr->addr1);
158 tid = skb->priority & QOS_CONTROL_TAG1D_MASK; 132 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
159 if (sta) { 133 if (sta) {
134 struct ieee80211_hw *hw = &local->hw;
160 int ampdu_queue = sta->tid_to_tx_q[tid]; 135 int ampdu_queue = sta->tid_to_tx_q[tid];
161 if ((ampdu_queue < local->hw.queues) && 136
162 test_bit(ampdu_queue, q->qdisc_pool)) { 137 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
138 test_bit(ampdu_queue, local->queue_pool)) {
163 queue = ampdu_queue; 139 queue = ampdu_queue;
164 pkt_data->flags |= IEEE80211_TXPD_AMPDU; 140 info->flags |= IEEE80211_TX_CTL_AMPDU;
165 } else { 141 } else {
166 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; 142 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
167 } 143 }
168 } 144 }
169 rcu_read_unlock(); 145 rcu_read_unlock();
170 skb_queue_tail(&q->requeued[queue], skb);
171 qd->q.qlen++;
172 return 0;
173 }
174 146
175 queue = classify80211(skb, qd); 147 return queue;
148 }
176 149
177 /* now we know the 1d priority, fill in the QoS header if there is one 150 /* Now we know the 1d priority, fill in the QoS header if
151 * there is one.
178 */ 152 */
179 if (WLAN_FC_IS_QOS_DATA(fc)) { 153 if (ieee80211_is_data_qos(hdr->frame_control)) {
180 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2; 154 u8 *p = ieee80211_get_qos_ctl(hdr);
181 u8 ack_policy = 0; 155 u8 ack_policy = 0;
182 tid = skb->priority & QOS_CONTROL_TAG1D_MASK; 156 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
183 if (local->wifi_wme_noack_test) 157 if (local->wifi_wme_noack_test)
184 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << 158 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
185 QOS_CONTROL_ACK_POLICY_SHIFT; 159 QOS_CONTROL_ACK_POLICY_SHIFT;
186 /* qos header is 2 bytes, second reserved */ 160 /* qos header is 2 bytes, second reserved */
187 *p = ack_policy | tid; 161 *p++ = ack_policy | tid;
188 p++;
189 *p = 0; 162 *p = 0;
190 163
191 rcu_read_lock(); 164 rcu_read_lock();
@@ -193,475 +166,37 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
193 sta = sta_info_get(local, hdr->addr1); 166 sta = sta_info_get(local, hdr->addr1);
194 if (sta) { 167 if (sta) {
195 int ampdu_queue = sta->tid_to_tx_q[tid]; 168 int ampdu_queue = sta->tid_to_tx_q[tid];
196 if ((ampdu_queue < local->hw.queues) && 169 struct ieee80211_hw *hw = &local->hw;
197 test_bit(ampdu_queue, q->qdisc_pool)) { 170
171 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
172 test_bit(ampdu_queue, local->queue_pool)) {
198 queue = ampdu_queue; 173 queue = ampdu_queue;
199 pkt_data->flags |= IEEE80211_TXPD_AMPDU; 174 info->flags |= IEEE80211_TX_CTL_AMPDU;
200 } else { 175 } else {
201 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; 176 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
202 } 177 }
203 } 178 }
204 179
205 rcu_read_unlock(); 180 rcu_read_unlock();
206 } 181 }
207 182
208 if (unlikely(queue >= local->hw.queues)) {
209#if 0
210 if (net_ratelimit()) {
211 printk(KERN_DEBUG "%s - queue=%d (hw does not "
212 "support) -> %d\n",
213 __func__, queue, local->hw.queues - 1);
214 }
215#endif
216 queue = local->hw.queues - 1;
217 }
218
219 if (unlikely(queue < 0)) {
220 kfree_skb(skb);
221 err = NET_XMIT_DROP;
222 } else {
223 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
224 pkt_data->queue = (unsigned int) queue;
225 qdisc = q->queues[queue];
226 err = qdisc->enqueue(skb, qdisc);
227 if (err == NET_XMIT_SUCCESS) {
228 qd->q.qlen++;
229 qd->bstats.bytes += skb->len;
230 qd->bstats.packets++;
231 return NET_XMIT_SUCCESS;
232 }
233 }
234 qd->qstats.drops++;
235 return err;
236}
237
238
239/* TODO: clean up the cases where master_hard_start_xmit
240 * returns non 0 - it shouldn't ever do that. Once done we
241 * can remove this function */
242static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
243{
244 struct ieee80211_sched_data *q = qdisc_priv(qd);
245 struct ieee80211_tx_packet_data *pkt_data =
246 (struct ieee80211_tx_packet_data *) skb->cb;
247 struct Qdisc *qdisc;
248 int err;
249
250 /* we recorded which queue to use earlier! */
251 qdisc = q->queues[pkt_data->queue];
252
253 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
254 qd->q.qlen++;
255 return 0;
256 }
257 qd->qstats.drops++;
258 return err;
259}
260
261
262static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
263{
264 struct ieee80211_sched_data *q = qdisc_priv(qd);
265 struct net_device *dev = qd->dev;
266 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
267 struct ieee80211_hw *hw = &local->hw;
268 struct sk_buff *skb;
269 struct Qdisc *qdisc;
270 int queue;
271
272 /* check all the h/w queues in numeric/priority order */
273 for (queue = 0; queue < hw->queues; queue++) {
274 /* see if there is room in this hardware queue */
275 if ((test_bit(IEEE80211_LINK_STATE_XOFF,
276 &local->state[queue])) ||
277 (test_bit(IEEE80211_LINK_STATE_PENDING,
278 &local->state[queue])) ||
279 (!test_bit(queue, q->qdisc_pool)))
280 continue;
281
282 /* there is space - try and get a frame */
283 skb = skb_dequeue(&q->requeued[queue]);
284 if (skb) {
285 qd->q.qlen--;
286 return skb;
287 }
288
289 qdisc = q->queues[queue];
290 skb = qdisc->dequeue(qdisc);
291 if (skb) {
292 qd->q.qlen--;
293 return skb;
294 }
295 }
296 /* returning a NULL here when all the h/w queues are full means we
297 * never need to call netif_stop_queue in the driver */
298 return NULL;
299}
300
301
302static void wme_qdiscop_reset(struct Qdisc* qd)
303{
304 struct ieee80211_sched_data *q = qdisc_priv(qd);
305 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
306 struct ieee80211_hw *hw = &local->hw;
307 int queue;
308
309 /* QUESTION: should we have some hardware flush functionality here? */
310
311 for (queue = 0; queue < hw->queues; queue++) {
312 skb_queue_purge(&q->requeued[queue]);
313 qdisc_reset(q->queues[queue]);
314 }
315 qd->q.qlen = 0;
316}
317
318
319static void wme_qdiscop_destroy(struct Qdisc* qd)
320{
321 struct ieee80211_sched_data *q = qdisc_priv(qd);
322 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
323 struct ieee80211_hw *hw = &local->hw;
324 int queue;
325
326 tcf_destroy_chain(&q->filter_list);
327
328 for (queue=0; queue < hw->queues; queue++) {
329 skb_queue_purge(&q->requeued[queue]);
330 qdisc_destroy(q->queues[queue]);
331 q->queues[queue] = &noop_qdisc;
332 }
333}
334
335
336/* called whenever parameters are updated on existing qdisc */
337static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
338{
339/* struct ieee80211_sched_data *q = qdisc_priv(qd);
340*/
341 /* check our options block is the right size */
342 /* copy any options to our local structure */
343/* Ignore options block for now - always use static mapping
344 struct tc_ieee80211_qopt *qopt = nla_data(opt);
345
346 if (opt->nla_len < nla_attr_size(sizeof(*qopt)))
347 return -EINVAL;
348 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
349*/
350 return 0;
351}
352
353
354/* called during initial creation of qdisc on device */
355static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
356{
357 struct ieee80211_sched_data *q = qdisc_priv(qd);
358 struct net_device *dev = qd->dev;
359 struct ieee80211_local *local;
360 int queues;
361 int err = 0, i;
362
363 /* check that device is a mac80211 device */
364 if (!dev->ieee80211_ptr ||
365 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
366 return -EINVAL;
367
368 /* check this device is an ieee80211 master type device */
369 if (dev->type != ARPHRD_IEEE80211)
370 return -EINVAL;
371
372 /* check that there is no qdisc currently attached to device
373 * this ensures that we will be the root qdisc. (I can't find a better
374 * way to test this explicitly) */
375 if (dev->qdisc_sleeping != &noop_qdisc)
376 return -EINVAL;
377
378 if (qd->flags & TCQ_F_INGRESS)
379 return -EINVAL;
380
381 local = wdev_priv(dev->ieee80211_ptr);
382 queues = local->hw.queues;
383
384 /* if options were passed in, set them */
385 if (opt) {
386 err = wme_qdiscop_tune(qd, opt);
387 }
388
389 /* create child queues */
390 for (i = 0; i < queues; i++) {
391 skb_queue_head_init(&q->requeued[i]);
392 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
393 qd->handle);
394 if (!q->queues[i]) {
395 q->queues[i] = &noop_qdisc;
396 printk(KERN_ERR "%s child qdisc %i creation failed\n",
397 dev->name, i);
398 }
399 }
400
401 /* reserve all legacy QoS queues */
402 for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++)
403 set_bit(i, q->qdisc_pool);
404
405 return err;
406}
407
408static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
409{
410/* struct ieee80211_sched_data *q = qdisc_priv(qd);
411 unsigned char *p = skb->tail;
412 struct tc_ieee80211_qopt opt;
413
414 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
415 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
416*/ return skb->len;
417/*
418nla_put_failure:
419 skb_trim(skb, p - skb->data);*/
420 return -1;
421}
422
423
424static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
425 struct Qdisc *new, struct Qdisc **old)
426{
427 struct ieee80211_sched_data *q = qdisc_priv(qd);
428 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
429 struct ieee80211_hw *hw = &local->hw;
430 unsigned long queue = arg - 1;
431
432 if (queue >= hw->queues)
433 return -EINVAL;
434
435 if (!new)
436 new = &noop_qdisc;
437
438 sch_tree_lock(qd);
439 *old = q->queues[queue];
440 q->queues[queue] = new;
441 qdisc_reset(*old);
442 sch_tree_unlock(qd);
443
444 return 0;
445}
446
447
448static struct Qdisc *
449wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
450{
451 struct ieee80211_sched_data *q = qdisc_priv(qd);
452 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
453 struct ieee80211_hw *hw = &local->hw;
454 unsigned long queue = arg - 1;
455
456 if (queue >= hw->queues)
457 return NULL;
458
459 return q->queues[queue];
460}
461
462
463static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
464{
465 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
466 struct ieee80211_hw *hw = &local->hw;
467 unsigned long queue = TC_H_MIN(classid);
468
469 if (queue - 1 >= hw->queues)
470 return 0;
471
472 return queue; 183 return queue;
473} 184}
474 185
475
476static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
477 u32 classid)
478{
479 return wme_classop_get(qd, classid);
480}
481
482
483static void wme_classop_put(struct Qdisc *q, unsigned long cl)
484{
485}
486
487
488static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
489 struct nlattr **tca, unsigned long *arg)
490{
491 unsigned long cl = *arg;
492 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
493 struct ieee80211_hw *hw = &local->hw;
494
495 if (cl - 1 > hw->queues)
496 return -ENOENT;
497
498 /* TODO: put code to program hardware queue parameters here,
499 * to allow programming from tc command line */
500
501 return 0;
502}
503
504
505/* we don't support deleting hardware queues
506 * when we add WMM-SA support - TSPECs may be deleted here */
507static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
508{
509 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
510 struct ieee80211_hw *hw = &local->hw;
511
512 if (cl - 1 > hw->queues)
513 return -ENOENT;
514 return 0;
515}
516
517
518static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
519 struct sk_buff *skb, struct tcmsg *tcm)
520{
521 struct ieee80211_sched_data *q = qdisc_priv(qd);
522 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
523 struct ieee80211_hw *hw = &local->hw;
524
525 if (cl - 1 > hw->queues)
526 return -ENOENT;
527 tcm->tcm_handle = TC_H_MIN(cl);
528 tcm->tcm_parent = qd->handle;
529 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
530 return 0;
531}
532
533
534static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
535{
536 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
537 struct ieee80211_hw *hw = &local->hw;
538 int queue;
539
540 if (arg->stop)
541 return;
542
543 for (queue = 0; queue < hw->queues; queue++) {
544 if (arg->count < arg->skip) {
545 arg->count++;
546 continue;
547 }
548 /* we should return classids for our internal queues here
549 * as well as the external ones */
550 if (arg->fn(qd, queue+1, arg) < 0) {
551 arg->stop = 1;
552 break;
553 }
554 arg->count++;
555 }
556}
557
558
559static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
560 unsigned long cl)
561{
562 struct ieee80211_sched_data *q = qdisc_priv(qd);
563
564 if (cl)
565 return NULL;
566
567 return &q->filter_list;
568}
569
570
571/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
572 * - these are the operations on the classes */
573static const struct Qdisc_class_ops class_ops =
574{
575 .graft = wme_classop_graft,
576 .leaf = wme_classop_leaf,
577
578 .get = wme_classop_get,
579 .put = wme_classop_put,
580 .change = wme_classop_change,
581 .delete = wme_classop_delete,
582 .walk = wme_classop_walk,
583
584 .tcf_chain = wme_classop_find_tcf,
585 .bind_tcf = wme_classop_bind,
586 .unbind_tcf = wme_classop_put,
587
588 .dump = wme_classop_dump_class,
589};
590
591
592/* queueing discipline operations */
593static struct Qdisc_ops wme_qdisc_ops __read_mostly =
594{
595 .next = NULL,
596 .cl_ops = &class_ops,
597 .id = "ieee80211",
598 .priv_size = sizeof(struct ieee80211_sched_data),
599
600 .enqueue = wme_qdiscop_enqueue,
601 .dequeue = wme_qdiscop_dequeue,
602 .requeue = wme_qdiscop_requeue,
603 .drop = NULL, /* drop not needed since we are always the root qdisc */
604
605 .init = wme_qdiscop_init,
606 .reset = wme_qdiscop_reset,
607 .destroy = wme_qdiscop_destroy,
608 .change = wme_qdiscop_tune,
609
610 .dump = wme_qdiscop_dump,
611};
612
613
614void ieee80211_install_qdisc(struct net_device *dev)
615{
616 struct Qdisc *qdisc;
617
618 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
619 if (!qdisc) {
620 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
621 return;
622 }
623
624 /* same handle as would be allocated by qdisc_alloc_handle() */
625 qdisc->handle = 0x80010000;
626
627 qdisc_lock_tree(dev);
628 list_add_tail(&qdisc->list, &dev->qdisc_list);
629 dev->qdisc_sleeping = qdisc;
630 qdisc_unlock_tree(dev);
631}
632
633
634int ieee80211_qdisc_installed(struct net_device *dev)
635{
636 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
637}
638
639
640int ieee80211_wme_register(void)
641{
642 return register_qdisc(&wme_qdisc_ops);
643}
644
645
646void ieee80211_wme_unregister(void)
647{
648 unregister_qdisc(&wme_qdisc_ops);
649}
650
651int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, 186int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
652 struct sta_info *sta, u16 tid) 187 struct sta_info *sta, u16 tid)
653{ 188{
654 int i; 189 int i;
655 struct ieee80211_sched_data *q =
656 qdisc_priv(local->mdev->qdisc_sleeping);
657 DECLARE_MAC_BUF(mac);
658 190
659 /* prepare the filter and save it for the SW queue 191 /* prepare the filter and save it for the SW queue
660 * matching the recieved HW queue */ 192 * matching the received HW queue */
193
194 if (!local->hw.ampdu_queues)
195 return -EPERM;
661 196
662 /* try to get a Qdisc from the pool */ 197 /* try to get a Qdisc from the pool */
663 for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++) 198 for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
664 if (!test_and_set_bit(i, q->qdisc_pool)) { 199 if (!test_and_set_bit(i, local->queue_pool)) {
665 ieee80211_stop_queue(local_to_hw(local), i); 200 ieee80211_stop_queue(local_to_hw(local), i);
666 sta->tid_to_tx_q[tid] = i; 201 sta->tid_to_tx_q[tid] = i;
667 202
@@ -670,11 +205,13 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
670 * on the previous queue 205 * on the previous queue
671 * since HT is strict in order */ 206 * since HT is strict in order */
672#ifdef CONFIG_MAC80211_HT_DEBUG 207#ifdef CONFIG_MAC80211_HT_DEBUG
673 if (net_ratelimit()) 208 if (net_ratelimit()) {
209 DECLARE_MAC_BUF(mac);
674 printk(KERN_DEBUG "allocated aggregation queue" 210 printk(KERN_DEBUG "allocated aggregation queue"
675 " %d tid %d addr %s pool=0x%lX\n", 211 " %d tid %d addr %s pool=0x%lX\n",
676 i, tid, print_mac(mac, sta->addr), 212 i, tid, print_mac(mac, sta->addr),
677 q->qdisc_pool[0]); 213 local->queue_pool[0]);
214 }
678#endif /* CONFIG_MAC80211_HT_DEBUG */ 215#endif /* CONFIG_MAC80211_HT_DEBUG */
679 return 0; 216 return 0;
680 } 217 }
@@ -683,44 +220,79 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
683} 220}
684 221
685/** 222/**
686 * the caller needs to hold local->mdev->queue_lock 223 * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
687 */ 224 */
688void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, 225void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
689 struct sta_info *sta, u16 tid, 226 struct sta_info *sta, u16 tid,
690 u8 requeue) 227 u8 requeue)
691{ 228{
692 struct ieee80211_sched_data *q =
693 qdisc_priv(local->mdev->qdisc_sleeping);
694 int agg_queue = sta->tid_to_tx_q[tid]; 229 int agg_queue = sta->tid_to_tx_q[tid];
230 struct ieee80211_hw *hw = &local->hw;
695 231
696 /* return the qdisc to the pool */ 232 /* return the qdisc to the pool */
697 clear_bit(agg_queue, q->qdisc_pool); 233 clear_bit(agg_queue, local->queue_pool);
698 sta->tid_to_tx_q[tid] = local->hw.queues; 234 sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
699 235
700 if (requeue) 236 if (requeue) {
701 ieee80211_requeue(local, agg_queue); 237 ieee80211_requeue(local, agg_queue);
702 else 238 } else {
703 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]); 239 struct netdev_queue *txq;
240 spinlock_t *root_lock;
241
242 txq = netdev_get_tx_queue(local->mdev, agg_queue);
243 root_lock = qdisc_root_lock(txq->qdisc);
244
245 spin_lock_bh(root_lock);
246 qdisc_reset(txq->qdisc);
247 spin_unlock_bh(root_lock);
248 }
704} 249}
705 250
706void ieee80211_requeue(struct ieee80211_local *local, int queue) 251void ieee80211_requeue(struct ieee80211_local *local, int queue)
707{ 252{
708 struct Qdisc *root_qd = local->mdev->qdisc_sleeping; 253 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
709 struct ieee80211_sched_data *q = qdisc_priv(root_qd); 254 struct sk_buff_head list;
710 struct Qdisc *qdisc = q->queues[queue]; 255 spinlock_t *root_lock;
711 struct sk_buff *skb = NULL; 256 struct Qdisc *qdisc;
712 u32 len; 257 u32 len;
713 258
259 rcu_read_lock_bh();
260
261 qdisc = rcu_dereference(txq->qdisc);
714 if (!qdisc || !qdisc->dequeue) 262 if (!qdisc || !qdisc->dequeue)
715 return; 263 goto out_unlock;
264
265 skb_queue_head_init(&list);
716 266
717 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen); 267 root_lock = qdisc_root_lock(qdisc);
268 spin_lock(root_lock);
718 for (len = qdisc->q.qlen; len > 0; len--) { 269 for (len = qdisc->q.qlen; len > 0; len--) {
719 skb = qdisc->dequeue(qdisc); 270 struct sk_buff *skb = qdisc->dequeue(qdisc);
720 root_qd->q.qlen--; 271
721 /* packet will be classified again and */
722 /* skb->packet_data->queue will be overridden if needed */
723 if (skb) 272 if (skb)
724 wme_qdiscop_enqueue(skb, root_qd); 273 __skb_queue_tail(&list, skb);
274 }
275 spin_unlock(root_lock);
276
277 for (len = list.qlen; len > 0; len--) {
278 struct sk_buff *skb = __skb_dequeue(&list);
279 u16 new_queue;
280
281 BUG_ON(!skb);
282 new_queue = ieee80211_select_queue(local->mdev, skb);
283 skb_set_queue_mapping(skb, new_queue);
284
285 txq = netdev_get_tx_queue(local->mdev, new_queue);
286
287
288 qdisc = rcu_dereference(txq->qdisc);
289 root_lock = qdisc_root_lock(qdisc);
290
291 spin_lock(root_lock);
292 qdisc->enqueue(skb, qdisc);
293 spin_unlock(root_lock);
725 } 294 }
295
296out_unlock:
297 rcu_read_unlock_bh();
726} 298}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index fcc6b05508cc..04de28c071a6 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -19,57 +19,16 @@
19#define QOS_CONTROL_ACK_POLICY_NORMAL 0 19#define QOS_CONTROL_ACK_POLICY_NORMAL 0
20#define QOS_CONTROL_ACK_POLICY_NOACK 1 20#define QOS_CONTROL_ACK_POLICY_NOACK 1
21 21
22#define QOS_CONTROL_TID_MASK 0x0f
23#define QOS_CONTROL_ACK_POLICY_SHIFT 5 22#define QOS_CONTROL_ACK_POLICY_SHIFT 5
24 23
25#define QOS_CONTROL_TAG1D_MASK 0x07
26
27extern const int ieee802_1d_to_ac[8]; 24extern const int ieee802_1d_to_ac[8];
28 25
29static inline int WLAN_FC_IS_QOS_DATA(u16 fc) 26u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb);
30{
31 return (fc & 0x8C) == 0x88;
32}
33
34#ifdef CONFIG_NET_SCHED
35void ieee80211_install_qdisc(struct net_device *dev);
36int ieee80211_qdisc_installed(struct net_device *dev);
37int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, 27int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
38 struct sta_info *sta, u16 tid); 28 struct sta_info *sta, u16 tid);
39void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, 29void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
40 struct sta_info *sta, u16 tid, 30 struct sta_info *sta, u16 tid,
41 u8 requeue); 31 u8 requeue);
42void ieee80211_requeue(struct ieee80211_local *local, int queue); 32void ieee80211_requeue(struct ieee80211_local *local, int queue);
43int ieee80211_wme_register(void);
44void ieee80211_wme_unregister(void);
45#else
46static inline void ieee80211_install_qdisc(struct net_device *dev)
47{
48}
49static inline int ieee80211_qdisc_installed(struct net_device *dev)
50{
51 return 0;
52}
53static inline int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
54 struct sta_info *sta, u16 tid)
55{
56 return -EAGAIN;
57}
58static inline void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
59 struct sta_info *sta, u16 tid,
60 u8 requeue)
61{
62}
63static inline void ieee80211_requeue(struct ieee80211_local *local, int queue)
64{
65}
66static inline int ieee80211_wme_register(void)
67{
68 return 0;
69}
70static inline void ieee80211_wme_unregister(void)
71{
72}
73#endif /* CONFIG_NET_SCHED */
74 33
75#endif /* _WME_H */ 34#endif /* _WME_H */
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 45709ada8fee..2f33df0dcccf 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -11,6 +11,8 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/ieee80211.h>
15#include <asm/unaligned.h>
14#include <net/mac80211.h> 16#include <net/mac80211.h>
15 17
16#include "ieee80211_i.h" 18#include "ieee80211_i.h"
@@ -19,76 +21,30 @@
19#include "aes_ccm.h" 21#include "aes_ccm.h"
20#include "wpa.h" 22#include "wpa.h"
21 23
22static int ieee80211_get_hdr_info(const struct sk_buff *skb, u8 **sa, u8 **da,
23 u8 *qos_tid, u8 **data, size_t *data_len)
24{
25 struct ieee80211_hdr *hdr;
26 size_t hdrlen;
27 u16 fc;
28 int a4_included;
29 u8 *pos;
30
31 hdr = (struct ieee80211_hdr *) skb->data;
32 fc = le16_to_cpu(hdr->frame_control);
33
34 hdrlen = 24;
35 if ((fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) ==
36 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
37 hdrlen += ETH_ALEN;
38 *sa = hdr->addr4;
39 *da = hdr->addr3;
40 } else if (fc & IEEE80211_FCTL_FROMDS) {
41 *sa = hdr->addr3;
42 *da = hdr->addr1;
43 } else if (fc & IEEE80211_FCTL_TODS) {
44 *sa = hdr->addr2;
45 *da = hdr->addr3;
46 } else {
47 *sa = hdr->addr2;
48 *da = hdr->addr1;
49 }
50
51 if (fc & 0x80)
52 hdrlen += 2;
53
54 *data = skb->data + hdrlen;
55 *data_len = skb->len - hdrlen;
56
57 a4_included = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
58 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
59 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
60 fc & IEEE80211_STYPE_QOS_DATA) {
61 pos = (u8 *) &hdr->addr4;
62 if (a4_included)
63 pos += 6;
64 *qos_tid = pos[0] & 0x0f;
65 *qos_tid |= 0x80; /* qos_included flag */
66 } else
67 *qos_tid = 0;
68
69 return skb->len < hdrlen ? -1 : 0;
70}
71
72
73ieee80211_tx_result 24ieee80211_tx_result
74ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) 25ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
75{ 26{
76 u8 *data, *sa, *da, *key, *mic, qos_tid; 27 u8 *data, *key, *mic, key_offset;
77 size_t data_len; 28 size_t data_len;
78 u16 fc; 29 unsigned int hdrlen;
30 struct ieee80211_hdr *hdr;
79 struct sk_buff *skb = tx->skb; 31 struct sk_buff *skb = tx->skb;
80 int authenticator; 32 int authenticator;
81 int wpa_test = 0; 33 int wpa_test = 0;
34 int tail;
82 35
83 fc = tx->fc; 36 hdr = (struct ieee80211_hdr *)skb->data;
84
85 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || 37 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 ||
86 !WLAN_FC_DATA_PRESENT(fc)) 38 !ieee80211_is_data_present(hdr->frame_control))
87 return TX_CONTINUE; 39 return TX_CONTINUE;
88 40
89 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)) 41 hdrlen = ieee80211_hdrlen(hdr->frame_control);
42 if (skb->len < hdrlen)
90 return TX_DROP; 43 return TX_DROP;
91 44
45 data = skb->data + hdrlen;
46 data_len = skb->len - hdrlen;
47
92 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 48 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
93 !(tx->flags & IEEE80211_TX_FRAGMENTED) && 49 !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
94 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && 50 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) &&
@@ -98,26 +54,27 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
98 return TX_CONTINUE; 54 return TX_CONTINUE;
99 } 55 }
100 56
101 if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { 57 tail = MICHAEL_MIC_LEN;
102 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 58 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
103 if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, 59 tail += TKIP_ICV_LEN;
104 MICHAEL_MIC_LEN + TKIP_ICV_LEN, 60
105 GFP_ATOMIC))) { 61 if (WARN_ON(skb_tailroom(skb) < tail ||
106 printk(KERN_DEBUG "%s: failed to allocate more memory " 62 skb_headroom(skb) < TKIP_IV_LEN))
107 "for Michael MIC\n", tx->dev->name); 63 return TX_DROP;
108 return TX_DROP;
109 }
110 }
111 64
112#if 0 65#if 0
113 authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ 66 authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */
114#else 67#else
115 authenticator = 1; 68 authenticator = 1;
116#endif 69#endif
117 key = &tx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_TX_MIC_KEY : 70 /* At this point we know we're using ALG_TKIP. To get the MIC key
118 ALG_TKIP_TEMP_AUTH_RX_MIC_KEY]; 71 * we now will rely on the offset from the ieee80211_key_conf::key */
72 key_offset = authenticator ?
73 NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY :
74 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
75 key = &tx->key->conf.key[key_offset];
119 mic = skb_put(skb, MICHAEL_MIC_LEN); 76 mic = skb_put(skb, MICHAEL_MIC_LEN);
120 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 77 michael_mic(key, hdr, data, data_len, mic);
121 78
122 return TX_CONTINUE; 79 return TX_CONTINUE;
123} 80}
@@ -126,47 +83,50 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
126ieee80211_rx_result 83ieee80211_rx_result
127ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) 84ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
128{ 85{
129 u8 *data, *sa, *da, *key = NULL, qos_tid; 86 u8 *data, *key = NULL, key_offset;
130 size_t data_len; 87 size_t data_len;
131 u16 fc; 88 unsigned int hdrlen;
89 struct ieee80211_hdr *hdr;
132 u8 mic[MICHAEL_MIC_LEN]; 90 u8 mic[MICHAEL_MIC_LEN];
133 struct sk_buff *skb = rx->skb; 91 struct sk_buff *skb = rx->skb;
134 int authenticator = 1, wpa_test = 0; 92 int authenticator = 1, wpa_test = 0;
135 DECLARE_MAC_BUF(mac); 93 DECLARE_MAC_BUF(mac);
136 94
137 fc = rx->fc;
138
139 /* 95 /*
140 * No way to verify the MIC if the hardware stripped it 96 * No way to verify the MIC if the hardware stripped it
141 */ 97 */
142 if (rx->status->flag & RX_FLAG_MMIC_STRIPPED) 98 if (rx->status->flag & RX_FLAG_MMIC_STRIPPED)
143 return RX_CONTINUE; 99 return RX_CONTINUE;
144 100
101 hdr = (struct ieee80211_hdr *)skb->data;
145 if (!rx->key || rx->key->conf.alg != ALG_TKIP || 102 if (!rx->key || rx->key->conf.alg != ALG_TKIP ||
146 !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc)) 103 !ieee80211_has_protected(hdr->frame_control) ||
104 !ieee80211_is_data_present(hdr->frame_control))
147 return RX_CONTINUE; 105 return RX_CONTINUE;
148 106
149 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len) 107 hdrlen = ieee80211_hdrlen(hdr->frame_control);
150 || data_len < MICHAEL_MIC_LEN) 108 if (skb->len < hdrlen + MICHAEL_MIC_LEN)
151 return RX_DROP_UNUSABLE; 109 return RX_DROP_UNUSABLE;
152 110
153 data_len -= MICHAEL_MIC_LEN; 111 data = skb->data + hdrlen;
112 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
154 113
155#if 0 114#if 0
156 authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */ 115 authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */
157#else 116#else
158 authenticator = 1; 117 authenticator = 1;
159#endif 118#endif
160 key = &rx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_RX_MIC_KEY : 119 /* At this point we know we're using ALG_TKIP. To get the MIC key
161 ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; 120 * we now will rely on the offset from the ieee80211_key_conf::key */
162 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 121 key_offset = authenticator ?
122 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY :
123 NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
124 key = &rx->key->conf.key[key_offset];
125 michael_mic(key, hdr, data, data_len, mic);
163 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { 126 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
164 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 127 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
165 return RX_DROP_UNUSABLE; 128 return RX_DROP_UNUSABLE;
166 129
167 printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from "
168 "%s\n", rx->dev->name, print_mac(mac, sa));
169
170 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, 130 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx,
171 (void *) skb->data); 131 (void *) skb->data);
172 return RX_DROP_UNUSABLE; 132 return RX_DROP_UNUSABLE;
@@ -176,59 +136,58 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
176 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); 136 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
177 137
178 /* update IV in key information to be able to detect replays */ 138 /* update IV in key information to be able to detect replays */
179 rx->key->u.tkip.iv32_rx[rx->queue] = rx->tkip_iv32; 139 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
180 rx->key->u.tkip.iv16_rx[rx->queue] = rx->tkip_iv16; 140 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
181 141
182 return RX_CONTINUE; 142 return RX_CONTINUE;
183} 143}
184 144
185 145
186static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, 146static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
187 struct sk_buff *skb, int test)
188{ 147{
189 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 148 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
190 struct ieee80211_key *key = tx->key; 149 struct ieee80211_key *key = tx->key;
191 int hdrlen, len, tailneed; 150 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
192 u16 fc; 151 unsigned int hdrlen;
152 int len, tail;
193 u8 *pos; 153 u8 *pos;
194 154
195 fc = le16_to_cpu(hdr->frame_control); 155 info->control.icv_len = TKIP_ICV_LEN;
196 hdrlen = ieee80211_get_hdrlen(fc); 156 info->control.iv_len = TKIP_IV_LEN;
157
158 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
159 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
160 /* hwaccel - with no need for preallocated room for IV/ICV */
161 info->control.hw_key = &tx->key->conf;
162 return 0;
163 }
164
165 hdrlen = ieee80211_hdrlen(hdr->frame_control);
197 len = skb->len - hdrlen; 166 len = skb->len - hdrlen;
198 167
199 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 168 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
200 tailneed = 0; 169 tail = 0;
201 else 170 else
202 tailneed = TKIP_ICV_LEN; 171 tail = TKIP_ICV_LEN;
203 172
204 if ((skb_headroom(skb) < TKIP_IV_LEN || 173 if (WARN_ON(skb_tailroom(skb) < tail ||
205 skb_tailroom(skb) < tailneed)) { 174 skb_headroom(skb) < TKIP_IV_LEN))
206 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 175 return -1;
207 if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, tailneed,
208 GFP_ATOMIC)))
209 return -1;
210 }
211 176
212 pos = skb_push(skb, TKIP_IV_LEN); 177 pos = skb_push(skb, TKIP_IV_LEN);
213 memmove(pos, pos + TKIP_IV_LEN, hdrlen); 178 memmove(pos, pos + TKIP_IV_LEN, hdrlen);
214 pos += hdrlen; 179 pos += hdrlen;
215 180
216 /* Increase IV for the frame */ 181 /* Increase IV for the frame */
217 key->u.tkip.iv16++; 182 key->u.tkip.tx.iv16++;
218 if (key->u.tkip.iv16 == 0) 183 if (key->u.tkip.tx.iv16 == 0)
219 key->u.tkip.iv32++; 184 key->u.tkip.tx.iv32++;
220 185
221 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 186 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
222 hdr = (struct ieee80211_hdr *)skb->data;
223
224 /* hwaccel - with preallocated room for IV */ 187 /* hwaccel - with preallocated room for IV */
225 ieee80211_tkip_add_iv(pos, key, 188 ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
226 (u8) (key->u.tkip.iv16 >> 8),
227 (u8) (((key->u.tkip.iv16 >> 8) | 0x20) &
228 0x7f),
229 (u8) key->u.tkip.iv16);
230 189
231 tx->control->key_idx = tx->key->conf.hw_key_idx; 190 info->control.hw_key = &tx->key->conf;
232 return 0; 191 return 0;
233 } 192 }
234 193
@@ -246,28 +205,16 @@ ieee80211_tx_result
246ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) 205ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
247{ 206{
248 struct sk_buff *skb = tx->skb; 207 struct sk_buff *skb = tx->skb;
249 int wpa_test = 0, test = 0;
250 208
251 tx->control->icv_len = TKIP_ICV_LEN;
252 tx->control->iv_len = TKIP_IV_LEN;
253 ieee80211_tx_set_protected(tx); 209 ieee80211_tx_set_protected(tx);
254 210
255 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 211 if (tkip_encrypt_skb(tx, skb) < 0)
256 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
257 !wpa_test) {
258 /* hwaccel - with no need for preallocated room for IV/ICV */
259 tx->control->key_idx = tx->key->conf.hw_key_idx;
260 return TX_CONTINUE;
261 }
262
263 if (tkip_encrypt_skb(tx, skb, test) < 0)
264 return TX_DROP; 212 return TX_DROP;
265 213
266 if (tx->extra_frag) { 214 if (tx->extra_frag) {
267 int i; 215 int i;
268 for (i = 0; i < tx->num_extra_frag; i++) { 216 for (i = 0; i < tx->num_extra_frag; i++) {
269 if (tkip_encrypt_skb(tx, tx->extra_frag[i], test) 217 if (tkip_encrypt_skb(tx, tx->extra_frag[i]) < 0)
270 < 0)
271 return TX_DROP; 218 return TX_DROP;
272 } 219 }
273 } 220 }
@@ -280,16 +227,14 @@ ieee80211_rx_result
280ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) 227ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
281{ 228{
282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 229 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
283 u16 fc;
284 int hdrlen, res, hwaccel = 0, wpa_test = 0; 230 int hdrlen, res, hwaccel = 0, wpa_test = 0;
285 struct ieee80211_key *key = rx->key; 231 struct ieee80211_key *key = rx->key;
286 struct sk_buff *skb = rx->skb; 232 struct sk_buff *skb = rx->skb;
287 DECLARE_MAC_BUF(mac); 233 DECLARE_MAC_BUF(mac);
288 234
289 fc = le16_to_cpu(hdr->frame_control); 235 hdrlen = ieee80211_hdrlen(hdr->frame_control);
290 hdrlen = ieee80211_get_hdrlen(fc);
291 236
292 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 237 if (!ieee80211_is_data(hdr->frame_control))
293 return RX_CONTINUE; 238 return RX_CONTINUE;
294 239
295 if (!rx->sta || skb->len - hdrlen < 12) 240 if (!rx->sta || skb->len - hdrlen < 12)
@@ -315,15 +260,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
315 hdr->addr1, hwaccel, rx->queue, 260 hdr->addr1, hwaccel, rx->queue,
316 &rx->tkip_iv32, 261 &rx->tkip_iv32,
317 &rx->tkip_iv16); 262 &rx->tkip_iv16);
318 if (res != TKIP_DECRYPT_OK || wpa_test) { 263 if (res != TKIP_DECRYPT_OK || wpa_test)
319#ifdef CONFIG_MAC80211_DEBUG
320 if (net_ratelimit())
321 printk(KERN_DEBUG "%s: TKIP decrypt failed for RX "
322 "frame from %s (res=%d)\n", rx->dev->name,
323 print_mac(mac, rx->sta->addr), res);
324#endif /* CONFIG_MAC80211_DEBUG */
325 return RX_DROP_UNUSABLE; 264 return RX_DROP_UNUSABLE;
326 }
327 265
328 /* Trim ICV */ 266 /* Trim ICV */
329 skb_trim(skb, skb->len - TKIP_ICV_LEN); 267 skb_trim(skb, skb->len - TKIP_ICV_LEN);
@@ -336,70 +274,68 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
336} 274}
337 275
338 276
339static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad, 277static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
340 int encrypted) 278 int encrypted)
341{ 279{
342 u16 fc; 280 __le16 mask_fc;
343 int a4_included, qos_included; 281 int a4_included;
344 u8 qos_tid, *fc_pos, *data, *sa, *da; 282 u8 qos_tid;
345 int len_a; 283 u8 *b_0, *aad;
346 size_t data_len; 284 u16 data_len, len_a;
347 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 285 unsigned int hdrlen;
286 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
348 287
349 fc_pos = (u8 *) &hdr->frame_control; 288 b_0 = scratch + 3 * AES_BLOCK_LEN;
350 fc = fc_pos[0] ^ (fc_pos[1] << 8); 289 aad = scratch + 4 * AES_BLOCK_LEN;
351 a4_included = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == 290
352 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); 291 /*
353 292 * Mask FC: zero subtype b4 b5 b6
354 ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len); 293 * Retry, PwrMgt, MoreData; set Protected
355 data_len -= CCMP_HDR_LEN + (encrypted ? CCMP_MIC_LEN : 0); 294 */
356 if (qos_tid & 0x80) { 295 mask_fc = hdr->frame_control;
357 qos_included = 1; 296 mask_fc &= ~cpu_to_le16(0x0070 | IEEE80211_FCTL_RETRY |
358 qos_tid &= 0x0f; 297 IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
359 } else 298 mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
360 qos_included = 0; 299
361 /* First block, b_0 */ 300 hdrlen = ieee80211_hdrlen(hdr->frame_control);
301 len_a = hdrlen - 2;
302 a4_included = ieee80211_has_a4(hdr->frame_control);
303
304 if (ieee80211_is_data_qos(hdr->frame_control))
305 qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
306 else
307 qos_tid = 0;
308
309 data_len = skb->len - hdrlen - CCMP_HDR_LEN;
310 if (encrypted)
311 data_len -= CCMP_MIC_LEN;
362 312
313 /* First block, b_0 */
363 b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ 314 b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */
364 /* Nonce: QoS Priority | A2 | PN */ 315 /* Nonce: QoS Priority | A2 | PN */
365 b_0[1] = qos_tid; 316 b_0[1] = qos_tid;
366 memcpy(&b_0[2], hdr->addr2, 6); 317 memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
367 memcpy(&b_0[8], pn, CCMP_PN_LEN); 318 memcpy(&b_0[8], pn, CCMP_PN_LEN);
368 /* l(m) */ 319 /* l(m) */
369 b_0[14] = (data_len >> 8) & 0xff; 320 put_unaligned_be16(data_len, &b_0[14]);
370 b_0[15] = data_len & 0xff;
371
372 321
373 /* AAD (extra authenticate-only data) / masked 802.11 header 322 /* AAD (extra authenticate-only data) / masked 802.11 header
374 * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ 323 * FC | A1 | A2 | A3 | SC | [A4] | [QC] */
375 324 put_unaligned_be16(len_a, &aad[0]);
376 len_a = a4_included ? 28 : 22; 325 put_unaligned(mask_fc, (__le16 *)&aad[2]);
377 if (qos_included) 326 memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
378 len_a += 2;
379
380 aad[0] = 0; /* (len_a >> 8) & 0xff; */
381 aad[1] = len_a & 0xff;
382 /* Mask FC: zero subtype b4 b5 b6 */
383 aad[2] = fc_pos[0] & ~(BIT(4) | BIT(5) | BIT(6));
384 /* Retry, PwrMgt, MoreData; set Protected */
385 aad[3] = (fc_pos[1] & ~(BIT(3) | BIT(4) | BIT(5))) | BIT(6);
386 memcpy(&aad[4], &hdr->addr1, 18);
387 327
388 /* Mask Seq#, leave Frag# */ 328 /* Mask Seq#, leave Frag# */
389 aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; 329 aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
390 aad[23] = 0; 330 aad[23] = 0;
331
391 if (a4_included) { 332 if (a4_included) {
392 memcpy(&aad[24], hdr->addr4, 6); 333 memcpy(&aad[24], hdr->addr4, ETH_ALEN);
393 aad[30] = 0; 334 aad[30] = qos_tid;
394 aad[31] = 0; 335 aad[31] = 0;
395 } else 336 } else {
396 memset(&aad[24], 0, 8); 337 memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
397 if (qos_included) { 338 aad[24] = qos_tid;
398 u8 *dpos = &aad[a4_included ? 30 : 24];
399
400 /* Mask QoS Control field */
401 dpos[0] = qos_tid;
402 dpos[1] = 0;
403 } 339 }
404} 340}
405 341
@@ -429,36 +365,37 @@ static inline int ccmp_hdr2pn(u8 *pn, u8 *hdr)
429} 365}
430 366
431 367
432static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, 368static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
433 struct sk_buff *skb, int test)
434{ 369{
435 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 370 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
436 struct ieee80211_key *key = tx->key; 371 struct ieee80211_key *key = tx->key;
437 int hdrlen, len, tailneed; 372 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
438 u16 fc; 373 int hdrlen, len, tail;
439 u8 *pos, *pn, *b_0, *aad, *scratch; 374 u8 *pos, *pn;
440 int i; 375 int i;
441 376
442 scratch = key->u.ccmp.tx_crypto_buf; 377 info->control.icv_len = CCMP_MIC_LEN;
443 b_0 = scratch + 3 * AES_BLOCK_LEN; 378 info->control.iv_len = CCMP_HDR_LEN;
444 aad = scratch + 4 * AES_BLOCK_LEN;
445 379
446 fc = le16_to_cpu(hdr->frame_control); 380 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
447 hdrlen = ieee80211_get_hdrlen(fc); 381 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
382 /* hwaccel - with no need for preallocated room for CCMP "
383 * header or MIC fields */
384 info->control.hw_key = &tx->key->conf;
385 return 0;
386 }
387
388 hdrlen = ieee80211_hdrlen(hdr->frame_control);
448 len = skb->len - hdrlen; 389 len = skb->len - hdrlen;
449 390
450 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 391 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
451 tailneed = 0; 392 tail = 0;
452 else 393 else
453 tailneed = CCMP_MIC_LEN; 394 tail = CCMP_MIC_LEN;
454 395
455 if ((skb_headroom(skb) < CCMP_HDR_LEN || 396 if (WARN_ON(skb_tailroom(skb) < tail ||
456 skb_tailroom(skb) < tailneed)) { 397 skb_headroom(skb) < CCMP_HDR_LEN))
457 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 398 return -1;
458 if (unlikely(pskb_expand_head(skb, CCMP_HDR_LEN, tailneed,
459 GFP_ATOMIC)))
460 return -1;
461 }
462 399
463 pos = skb_push(skb, CCMP_HDR_LEN); 400 pos = skb_push(skb, CCMP_HDR_LEN);
464 memmove(pos, pos + CCMP_HDR_LEN, hdrlen); 401 memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
@@ -478,13 +415,13 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx,
478 415
479 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 416 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
480 /* hwaccel - with preallocated room for CCMP header */ 417 /* hwaccel - with preallocated room for CCMP header */
481 tx->control->key_idx = key->conf.hw_key_idx; 418 info->control.hw_key = &tx->key->conf;
482 return 0; 419 return 0;
483 } 420 }
484 421
485 pos += CCMP_HDR_LEN; 422 pos += CCMP_HDR_LEN;
486 ccmp_special_blocks(skb, pn, b_0, aad, 0); 423 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0);
487 ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, b_0, aad, pos, len, 424 ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len,
488 pos, skb_put(skb, CCMP_MIC_LEN)); 425 pos, skb_put(skb, CCMP_MIC_LEN));
489 426
490 return 0; 427 return 0;
@@ -495,28 +432,16 @@ ieee80211_tx_result
495ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) 432ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
496{ 433{
497 struct sk_buff *skb = tx->skb; 434 struct sk_buff *skb = tx->skb;
498 int test = 0;
499 435
500 tx->control->icv_len = CCMP_MIC_LEN;
501 tx->control->iv_len = CCMP_HDR_LEN;
502 ieee80211_tx_set_protected(tx); 436 ieee80211_tx_set_protected(tx);
503 437
504 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 438 if (ccmp_encrypt_skb(tx, skb) < 0)
505 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
506 /* hwaccel - with no need for preallocated room for CCMP "
507 * header or MIC fields */
508 tx->control->key_idx = tx->key->conf.hw_key_idx;
509 return TX_CONTINUE;
510 }
511
512 if (ccmp_encrypt_skb(tx, skb, test) < 0)
513 return TX_DROP; 439 return TX_DROP;
514 440
515 if (tx->extra_frag) { 441 if (tx->extra_frag) {
516 int i; 442 int i;
517 for (i = 0; i < tx->num_extra_frag; i++) { 443 for (i = 0; i < tx->num_extra_frag; i++) {
518 if (ccmp_encrypt_skb(tx, tx->extra_frag[i], test) 444 if (ccmp_encrypt_skb(tx, tx->extra_frag[i]) < 0)
519 < 0)
520 return TX_DROP; 445 return TX_DROP;
521 } 446 }
522 } 447 }
@@ -528,8 +453,7 @@ ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
528ieee80211_rx_result 453ieee80211_rx_result
529ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) 454ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
530{ 455{
531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 456 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
532 u16 fc;
533 int hdrlen; 457 int hdrlen;
534 struct ieee80211_key *key = rx->key; 458 struct ieee80211_key *key = rx->key;
535 struct sk_buff *skb = rx->skb; 459 struct sk_buff *skb = rx->skb;
@@ -537,10 +461,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
537 int data_len; 461 int data_len;
538 DECLARE_MAC_BUF(mac); 462 DECLARE_MAC_BUF(mac);
539 463
540 fc = le16_to_cpu(hdr->frame_control); 464 hdrlen = ieee80211_hdrlen(hdr->frame_control);
541 hdrlen = ieee80211_get_hdrlen(fc);
542 465
543 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 466 if (!ieee80211_is_data(hdr->frame_control))
544 return RX_CONTINUE; 467 return RX_CONTINUE;
545 468
546 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; 469 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN;
@@ -554,41 +477,19 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
554 (void) ccmp_hdr2pn(pn, skb->data + hdrlen); 477 (void) ccmp_hdr2pn(pn, skb->data + hdrlen);
555 478
556 if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { 479 if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) {
557#ifdef CONFIG_MAC80211_DEBUG
558 u8 *ppn = key->u.ccmp.rx_pn[rx->queue];
559
560 printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from "
561 "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN "
562 "%02x%02x%02x%02x%02x%02x)\n", rx->dev->name,
563 print_mac(mac, rx->sta->addr),
564 pn[0], pn[1], pn[2], pn[3], pn[4], pn[5],
565 ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]);
566#endif /* CONFIG_MAC80211_DEBUG */
567 key->u.ccmp.replays++; 480 key->u.ccmp.replays++;
568 return RX_DROP_UNUSABLE; 481 return RX_DROP_UNUSABLE;
569 } 482 }
570 483
571 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 484 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
572 /* hardware didn't decrypt/verify MIC */ 485 /* hardware didn't decrypt/verify MIC */
573 u8 *scratch, *b_0, *aad; 486 ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1);
574
575 scratch = key->u.ccmp.rx_crypto_buf;
576 b_0 = scratch + 3 * AES_BLOCK_LEN;
577 aad = scratch + 4 * AES_BLOCK_LEN;
578
579 ccmp_special_blocks(skb, pn, b_0, aad, 1);
580 487
581 if (ieee80211_aes_ccm_decrypt( 488 if (ieee80211_aes_ccm_decrypt(
582 key->u.ccmp.tfm, scratch, b_0, aad, 489 key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf,
583 skb->data + hdrlen + CCMP_HDR_LEN, data_len, 490 skb->data + hdrlen + CCMP_HDR_LEN, data_len,
584 skb->data + skb->len - CCMP_MIC_LEN, 491 skb->data + skb->len - CCMP_MIC_LEN,
585 skb->data + hdrlen + CCMP_HDR_LEN)) { 492 skb->data + hdrlen + CCMP_HDR_LEN)) {
586#ifdef CONFIG_MAC80211_DEBUG
587 if (net_ratelimit())
588 printk(KERN_DEBUG "%s: CCMP decrypt failed "
589 "for RX frame from %s\n", rx->dev->name,
590 print_mac(mac, rx->sta->addr));
591#endif /* CONFIG_MAC80211_DEBUG */
592 return RX_DROP_UNUSABLE; 493 return RX_DROP_UNUSABLE;
593 } 494 }
594 } 495 }
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index aa8d80c35e28..316c7af1d2b1 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -33,9 +33,8 @@ config NF_CONNTRACK
33 into connections. 33 into connections.
34 34
35 This is required to do Masquerading or other kinds of Network 35 This is required to do Masquerading or other kinds of Network
36 Address Translation (except for Fast NAT). It can also be used to 36 Address Translation. It can also be used to enhance packet
37 enhance packet filtering (see `Connection state match support' 37 filtering (see `Connection state match support' below).
38 below).
39 38
40 To compile it as a module, choose M here. If unsure, say N. 39 To compile it as a module, choose M here. If unsure, say N.
41 40
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 662c1ccfee26..28d03e64200b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -464,7 +464,8 @@ static noinline int early_drop(unsigned int hash)
464} 464}
465 465
466struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, 466struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
467 const struct nf_conntrack_tuple *repl) 467 const struct nf_conntrack_tuple *repl,
468 gfp_t gfp)
468{ 469{
469 struct nf_conn *ct = NULL; 470 struct nf_conn *ct = NULL;
470 471
@@ -489,7 +490,7 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
489 } 490 }
490 } 491 }
491 492
492 ct = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC); 493 ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
493 if (ct == NULL) { 494 if (ct == NULL) {
494 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 495 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
495 atomic_dec(&nf_conntrack_count); 496 atomic_dec(&nf_conntrack_count);
@@ -542,7 +543,7 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
542 return NULL; 543 return NULL;
543 } 544 }
544 545
545 ct = nf_conntrack_alloc(tuple, &repl_tuple); 546 ct = nf_conntrack_alloc(tuple, &repl_tuple, GFP_ATOMIC);
546 if (ct == NULL || IS_ERR(ct)) { 547 if (ct == NULL || IS_ERR(ct)) {
547 pr_debug("Can't allocate conntrack.\n"); 548 pr_debug("Can't allocate conntrack.\n");
548 return (struct nf_conntrack_tuple_hash *)ct; 549 return (struct nf_conntrack_tuple_hash *)ct;
@@ -847,6 +848,28 @@ acct:
847} 848}
848EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 849EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
849 850
851bool __nf_ct_kill_acct(struct nf_conn *ct,
852 enum ip_conntrack_info ctinfo,
853 const struct sk_buff *skb,
854 int do_acct)
855{
856#ifdef CONFIG_NF_CT_ACCT
857 if (do_acct) {
858 spin_lock_bh(&nf_conntrack_lock);
859 ct->counters[CTINFO2DIR(ctinfo)].packets++;
860 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
861 skb->len - skb_network_offset(skb);
862 spin_unlock_bh(&nf_conntrack_lock);
863 }
864#endif
865 if (del_timer(&ct->timeout)) {
866 ct->timeout.function((unsigned long)ct);
867 return true;
868 }
869 return false;
870}
871EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
872
850#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 873#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
851 874
852#include <linux/netfilter/nfnetlink.h> 875#include <linux/netfilter/nfnetlink.h>
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 8a3f8b34e466..3469bc71a385 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -95,13 +95,11 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
95 newlen = newoff + t->len; 95 newlen = newoff + t->len;
96 rcu_read_unlock(); 96 rcu_read_unlock();
97 97
98 if (newlen >= ksize(ct->ext)) { 98 new = krealloc(ct->ext, newlen, gfp);
99 new = kmalloc(newlen, gfp); 99 if (!new)
100 if (!new) 100 return NULL;
101 return NULL;
102
103 memcpy(new, ct->ext, ct->ext->len);
104 101
102 if (new != ct->ext) {
105 for (i = 0; i < NF_CT_EXT_NUM; i++) { 103 for (i = 0; i < NF_CT_EXT_NUM; i++) {
106 if (!nf_ct_ext_exist(ct, i)) 104 if (!nf_ct_ext_exist(ct, i))
107 continue; 105 continue;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 077bcd228799..95a7967731f9 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2007 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
8 * 8 *
9 * Initial connection tracking via netlink development funded and 9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -476,14 +476,14 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
476 if (ctnetlink_dump_id(skb, ct) < 0) 476 if (ctnetlink_dump_id(skb, ct) < 0)
477 goto nla_put_failure; 477 goto nla_put_failure;
478 478
479 if (ctnetlink_dump_status(skb, ct) < 0)
480 goto nla_put_failure;
481
479 if (events & IPCT_DESTROY) { 482 if (events & IPCT_DESTROY) {
480 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 483 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
481 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) 484 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
482 goto nla_put_failure; 485 goto nla_put_failure;
483 } else { 486 } else {
484 if (ctnetlink_dump_status(skb, ct) < 0)
485 goto nla_put_failure;
486
487 if (ctnetlink_dump_timeout(skb, ct) < 0) 487 if (ctnetlink_dump_timeout(skb, ct) < 0)
488 goto nla_put_failure; 488 goto nla_put_failure;
489 489
@@ -813,9 +813,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
813 return -ENOENT; 813 return -ENOENT;
814 } 814 }
815 } 815 }
816 if (del_timer(&ct->timeout))
817 ct->timeout.function((unsigned long)ct);
818 816
817 nf_ct_kill(ct);
819 nf_ct_put(ct); 818 nf_ct_put(ct);
820 819
821 return 0; 820 return 0;
@@ -892,20 +891,19 @@ ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[])
892 891
893 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) 892 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
894 /* unchangeable */ 893 /* unchangeable */
895 return -EINVAL; 894 return -EBUSY;
896 895
897 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 896 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
898 /* SEEN_REPLY bit can only be set */ 897 /* SEEN_REPLY bit can only be set */
899 return -EINVAL; 898 return -EBUSY;
900
901 899
902 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 900 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
903 /* ASSURED bit can only be set */ 901 /* ASSURED bit can only be set */
904 return -EINVAL; 902 return -EBUSY;
905 903
906 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 904 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
907#ifndef CONFIG_NF_NAT_NEEDED 905#ifndef CONFIG_NF_NAT_NEEDED
908 return -EINVAL; 906 return -EOPNOTSUPP;
909#else 907#else
910 struct nf_nat_range range; 908 struct nf_nat_range range;
911 909
@@ -946,7 +944,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
946 944
947 /* don't change helper of sibling connections */ 945 /* don't change helper of sibling connections */
948 if (ct->master) 946 if (ct->master)
949 return -EINVAL; 947 return -EBUSY;
950 948
951 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 949 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
952 if (err < 0) 950 if (err < 0)
@@ -964,7 +962,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
964 962
965 helper = __nf_conntrack_helper_find_byname(helpname); 963 helper = __nf_conntrack_helper_find_byname(helpname);
966 if (helper == NULL) 964 if (helper == NULL)
967 return -EINVAL; 965 return -EOPNOTSUPP;
968 966
969 if (help) { 967 if (help) {
970 if (help->helper == helper) 968 if (help->helper == helper)
@@ -1131,7 +1129,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1131 struct nf_conn_help *help; 1129 struct nf_conn_help *help;
1132 struct nf_conntrack_helper *helper; 1130 struct nf_conntrack_helper *helper;
1133 1131
1134 ct = nf_conntrack_alloc(otuple, rtuple); 1132 ct = nf_conntrack_alloc(otuple, rtuple, GFP_KERNEL);
1135 if (ct == NULL || IS_ERR(ct)) 1133 if (ct == NULL || IS_ERR(ct))
1136 return -ENOMEM; 1134 return -ENOMEM;
1137 1135
@@ -1259,12 +1257,12 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1259 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1257 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1260 /* we only allow nat config for new conntracks */ 1258 /* we only allow nat config for new conntracks */
1261 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1259 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1262 err = -EINVAL; 1260 err = -EOPNOTSUPP;
1263 goto out_unlock; 1261 goto out_unlock;
1264 } 1262 }
1265 /* can't link an existing conntrack to a master */ 1263 /* can't link an existing conntrack to a master */
1266 if (cda[CTA_TUPLE_MASTER]) { 1264 if (cda[CTA_TUPLE_MASTER]) {
1267 err = -EINVAL; 1265 err = -EOPNOTSUPP;
1268 goto out_unlock; 1266 goto out_unlock;
1269 } 1267 }
1270 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), 1268 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h),
@@ -1609,7 +1607,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1609 h = __nf_conntrack_helper_find_byname(name); 1607 h = __nf_conntrack_helper_find_byname(name);
1610 if (!h) { 1608 if (!h) {
1611 spin_unlock_bh(&nf_conntrack_lock); 1609 spin_unlock_bh(&nf_conntrack_lock);
1612 return -EINVAL; 1610 return -EOPNOTSUPP;
1613 } 1611 }
1614 for (i = 0; i < nf_ct_expect_hsize; i++) { 1612 for (i = 0; i < nf_ct_expect_hsize; i++) {
1615 hlist_for_each_entry_safe(exp, n, next, 1613 hlist_for_each_entry_safe(exp, n, next,
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index afb4a1861d2c..e7866dd3cde6 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -475,8 +475,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
475 if (type == DCCP_PKT_RESET && 475 if (type == DCCP_PKT_RESET &&
476 !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 476 !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
477 /* Tear down connection immediately if only reply is a RESET */ 477 /* Tear down connection immediately if only reply is a RESET */
478 if (del_timer(&ct->timeout)) 478 nf_ct_kill_acct(ct, ctinfo, skb);
479 ct->timeout.function((unsigned long)ct);
480 return NF_ACCEPT; 479 return NF_ACCEPT;
481 } 480 }
482 481
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index cbf2e27a22b2..41183a4d2d62 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -463,6 +463,82 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
463 return true; 463 return true;
464} 464}
465 465
466#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
467
468#include <linux/netfilter/nfnetlink.h>
469#include <linux/netfilter/nfnetlink_conntrack.h>
470
471static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
472 const struct nf_conn *ct)
473{
474 struct nlattr *nest_parms;
475
476 read_lock_bh(&sctp_lock);
477 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED);
478 if (!nest_parms)
479 goto nla_put_failure;
480
481 NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state);
482
483 NLA_PUT_BE32(skb,
484 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
485 htonl(ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]));
486
487 NLA_PUT_BE32(skb,
488 CTA_PROTOINFO_SCTP_VTAG_REPLY,
489 htonl(ct->proto.sctp.vtag[IP_CT_DIR_REPLY]));
490
491 read_unlock_bh(&sctp_lock);
492
493 nla_nest_end(skb, nest_parms);
494
495 return 0;
496
497nla_put_failure:
498 read_unlock_bh(&sctp_lock);
499 return -1;
500}
501
502static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
503 [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
504 [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
505 [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
506};
507
508static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
509{
510 struct nlattr *attr = cda[CTA_PROTOINFO_SCTP];
511 struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1];
512 int err;
513
514 /* updates may not contain the internal protocol info, skip parsing */
515 if (!attr)
516 return 0;
517
518 err = nla_parse_nested(tb,
519 CTA_PROTOINFO_SCTP_MAX,
520 attr,
521 sctp_nla_policy);
522 if (err < 0)
523 return err;
524
525 if (!tb[CTA_PROTOINFO_SCTP_STATE] ||
526 !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] ||
527 !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])
528 return -EINVAL;
529
530 write_lock_bh(&sctp_lock);
531 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]);
532 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] =
533 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]));
534 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] =
535 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]));
536 write_unlock_bh(&sctp_lock);
537
538 return 0;
539}
540#endif
541
466#ifdef CONFIG_SYSCTL 542#ifdef CONFIG_SYSCTL
467static unsigned int sctp_sysctl_table_users; 543static unsigned int sctp_sysctl_table_users;
468static struct ctl_table_header *sctp_sysctl_header; 544static struct ctl_table_header *sctp_sysctl_header;
@@ -591,6 +667,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
591 .new = sctp_new, 667 .new = sctp_new,
592 .me = THIS_MODULE, 668 .me = THIS_MODULE,
593#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 669#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
670 .to_nlattr = sctp_to_nlattr,
671 .from_nlattr = nlattr_to_sctp,
594 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 672 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
595 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 673 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
596 .nla_policy = nf_ct_port_nla_policy, 674 .nla_policy = nf_ct_port_nla_policy,
@@ -617,6 +695,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
617 .new = sctp_new, 695 .new = sctp_new,
618 .me = THIS_MODULE, 696 .me = THIS_MODULE,
619#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 697#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
698 .to_nlattr = sctp_to_nlattr,
699 .from_nlattr = nlattr_to_sctp,
620 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 700 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
621 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 701 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
622 .nla_policy = nf_ct_port_nla_policy, 702 .nla_policy = nf_ct_port_nla_policy,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index dd28fb239a60..420a10d8eb1e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -844,14 +844,13 @@ static int tcp_packet(struct nf_conn *ct,
844 /* Attempt to reopen a closed/aborted connection. 844 /* Attempt to reopen a closed/aborted connection.
845 * Delete this connection and look up again. */ 845 * Delete this connection and look up again. */
846 write_unlock_bh(&tcp_lock); 846 write_unlock_bh(&tcp_lock);
847
847 /* Only repeat if we can actually remove the timer. 848 /* Only repeat if we can actually remove the timer.
848 * Destruction may already be in progress in process 849 * Destruction may already be in progress in process
849 * context and we must give it a chance to terminate. 850 * context and we must give it a chance to terminate.
850 */ 851 */
851 if (del_timer(&ct->timeout)) { 852 if (nf_ct_kill(ct))
852 ct->timeout.function((unsigned long)ct);
853 return -NF_REPEAT; 853 return -NF_REPEAT;
854 }
855 return -NF_DROP; 854 return -NF_DROP;
856 } 855 }
857 /* Fall through */ 856 /* Fall through */
@@ -884,8 +883,7 @@ static int tcp_packet(struct nf_conn *ct,
884 if (LOG_INVALID(IPPROTO_TCP)) 883 if (LOG_INVALID(IPPROTO_TCP))
885 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 884 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
886 "nf_ct_tcp: killing out of sync session "); 885 "nf_ct_tcp: killing out of sync session ");
887 if (del_timer(&ct->timeout)) 886 nf_ct_kill(ct);
888 ct->timeout.function((unsigned long)ct);
889 return -NF_DROP; 887 return -NF_DROP;
890 } 888 }
891 ct->proto.tcp.last_index = index; 889 ct->proto.tcp.last_index = index;
@@ -968,8 +966,7 @@ static int tcp_packet(struct nf_conn *ct,
968 problem case, so we can delete the conntrack 966 problem case, so we can delete the conntrack
969 immediately. --RR */ 967 immediately. --RR */
970 if (th->rst) { 968 if (th->rst) {
971 if (del_timer(&ct->timeout)) 969 nf_ct_kill_acct(ct, ctinfo, skb);
972 ct->timeout.function((unsigned long)ct);
973 return NF_ACCEPT; 970 return NF_ACCEPT;
974 } 971 }
975 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status) 972 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3447025ce068..04e9c965f8ca 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -243,7 +243,6 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
243 switch ((enum nfqnl_config_mode)queue->copy_mode) { 243 switch ((enum nfqnl_config_mode)queue->copy_mode) {
244 case NFQNL_COPY_META: 244 case NFQNL_COPY_META:
245 case NFQNL_COPY_NONE: 245 case NFQNL_COPY_NONE:
246 data_len = 0;
247 break; 246 break;
248 247
249 case NFQNL_COPY_PACKET: 248 case NFQNL_COPY_PACKET:
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index 211189eb2b67..76ca1f2421eb 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -8,7 +8,7 @@
8 * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> 8 * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
9 * by Henrik Nordstrom <hno@marasystems.com> 9 * by Henrik Nordstrom <hno@marasystems.com>
10 * 10 *
11 * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> 11 * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
@@ -94,6 +94,12 @@ connsecmark_tg_check(const char *tablename, const void *entry,
94{ 94{
95 const struct xt_connsecmark_target_info *info = targinfo; 95 const struct xt_connsecmark_target_info *info = targinfo;
96 96
97 if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) {
98 printk(KERN_INFO PFX "target only valid in the \'mangle\' "
99 "or \'security\' tables, not \'%s\'.\n", tablename);
100 return false;
101 }
102
97 switch (info->mode) { 103 switch (info->mode) {
98 case CONNSECMARK_SAVE: 104 case CONNSECMARK_SAVE:
99 case CONNSECMARK_RESTORE: 105 case CONNSECMARK_RESTORE:
@@ -126,7 +132,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = {
126 .destroy = connsecmark_tg_destroy, 132 .destroy = connsecmark_tg_destroy,
127 .target = connsecmark_tg, 133 .target = connsecmark_tg,
128 .targetsize = sizeof(struct xt_connsecmark_target_info), 134 .targetsize = sizeof(struct xt_connsecmark_target_info),
129 .table = "mangle",
130 .me = THIS_MODULE, 135 .me = THIS_MODULE,
131 }, 136 },
132 { 137 {
@@ -136,7 +141,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = {
136 .destroy = connsecmark_tg_destroy, 141 .destroy = connsecmark_tg_destroy,
137 .target = connsecmark_tg, 142 .target = connsecmark_tg,
138 .targetsize = sizeof(struct xt_connsecmark_target_info), 143 .targetsize = sizeof(struct xt_connsecmark_target_info),
139 .table = "mangle",
140 .me = THIS_MODULE, 144 .me = THIS_MODULE,
141 }, 145 },
142}; 146};
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index c0284856ccd4..94f87ee7552b 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -5,7 +5,7 @@
5 * Based on the nfmark match by: 5 * Based on the nfmark match by:
6 * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> 6 * (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
7 * 7 *
8 * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> 8 * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -89,6 +89,12 @@ secmark_tg_check(const char *tablename, const void *entry,
89{ 89{
90 struct xt_secmark_target_info *info = targinfo; 90 struct xt_secmark_target_info *info = targinfo;
91 91
92 if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) {
93 printk(KERN_INFO PFX "target only valid in the \'mangle\' "
94 "or \'security\' tables, not \'%s\'.\n", tablename);
95 return false;
96 }
97
92 if (mode && mode != info->mode) { 98 if (mode && mode != info->mode) {
93 printk(KERN_INFO PFX "mode already set to %hu cannot mix with " 99 printk(KERN_INFO PFX "mode already set to %hu cannot mix with "
94 "rules for mode %hu\n", mode, info->mode); 100 "rules for mode %hu\n", mode, info->mode);
@@ -127,7 +133,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
127 .destroy = secmark_tg_destroy, 133 .destroy = secmark_tg_destroy,
128 .target = secmark_tg, 134 .target = secmark_tg,
129 .targetsize = sizeof(struct xt_secmark_target_info), 135 .targetsize = sizeof(struct xt_secmark_target_info),
130 .table = "mangle",
131 .me = THIS_MODULE, 136 .me = THIS_MODULE,
132 }, 137 },
133 { 138 {
@@ -137,7 +142,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
137 .destroy = secmark_tg_destroy, 142 .destroy = secmark_tg_destroy,
138 .target = secmark_tg, 143 .target = secmark_tg,
139 .targetsize = sizeof(struct xt_secmark_target_info), 144 .targetsize = sizeof(struct xt_secmark_target_info),
140 .table = "mangle",
141 .me = THIS_MODULE, 145 .me = THIS_MODULE,
142 }, 146 },
143}; 147};
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index 72f694d947f4..4903182a062b 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -29,12 +29,16 @@ string_mt(const struct sk_buff *skb, const struct net_device *in,
29{ 29{
30 const struct xt_string_info *conf = matchinfo; 30 const struct xt_string_info *conf = matchinfo;
31 struct ts_state state; 31 struct ts_state state;
32 int invert;
32 33
33 memset(&state, 0, sizeof(struct ts_state)); 34 memset(&state, 0, sizeof(struct ts_state));
34 35
36 invert = (match->revision == 0 ? conf->u.v0.invert :
37 conf->u.v1.flags & XT_STRING_FLAG_INVERT);
38
35 return (skb_find_text((struct sk_buff *)skb, conf->from_offset, 39 return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
36 conf->to_offset, conf->config, &state) 40 conf->to_offset, conf->config, &state)
37 != UINT_MAX) ^ conf->invert; 41 != UINT_MAX) ^ invert;
38} 42}
39 43
40#define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m)) 44#define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m))
@@ -46,6 +50,7 @@ string_mt_check(const char *tablename, const void *ip,
46{ 50{
47 struct xt_string_info *conf = matchinfo; 51 struct xt_string_info *conf = matchinfo;
48 struct ts_config *ts_conf; 52 struct ts_config *ts_conf;
53 int flags = TS_AUTOLOAD;
49 54
50 /* Damn, can't handle this case properly with iptables... */ 55 /* Damn, can't handle this case properly with iptables... */
51 if (conf->from_offset > conf->to_offset) 56 if (conf->from_offset > conf->to_offset)
@@ -54,8 +59,15 @@ string_mt_check(const char *tablename, const void *ip,
54 return false; 59 return false;
55 if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) 60 if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE)
56 return false; 61 return false;
62 if (match->revision == 1) {
63 if (conf->u.v1.flags &
64 ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT))
65 return false;
66 if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE)
67 flags |= TS_IGNORECASE;
68 }
57 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 69 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
58 GFP_KERNEL, TS_AUTOLOAD); 70 GFP_KERNEL, flags);
59 if (IS_ERR(ts_conf)) 71 if (IS_ERR(ts_conf))
60 return false; 72 return false;
61 73
@@ -72,6 +84,17 @@ static void string_mt_destroy(const struct xt_match *match, void *matchinfo)
72static struct xt_match string_mt_reg[] __read_mostly = { 84static struct xt_match string_mt_reg[] __read_mostly = {
73 { 85 {
74 .name = "string", 86 .name = "string",
87 .revision = 0,
88 .family = AF_INET,
89 .checkentry = string_mt_check,
90 .match = string_mt,
91 .destroy = string_mt_destroy,
92 .matchsize = sizeof(struct xt_string_info),
93 .me = THIS_MODULE
94 },
95 {
96 .name = "string",
97 .revision = 1,
75 .family = AF_INET, 98 .family = AF_INET,
76 .checkentry = string_mt_check, 99 .checkentry = string_mt_check,
77 .match = string_mt, 100 .match = string_mt,
@@ -81,6 +104,17 @@ static struct xt_match string_mt_reg[] __read_mostly = {
81 }, 104 },
82 { 105 {
83 .name = "string", 106 .name = "string",
107 .revision = 0,
108 .family = AF_INET6,
109 .checkentry = string_mt_check,
110 .match = string_mt,
111 .destroy = string_mt_destroy,
112 .matchsize = sizeof(struct xt_string_info),
113 .me = THIS_MODULE
114 },
115 {
116 .name = "string",
117 .revision = 1,
84 .family = AF_INET6, 118 .family = AF_INET6,
85 .checkentry = string_mt_check, 119 .checkentry = string_mt_check,
86 .match = string_mt, 120 .match = string_mt,
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 9080c61b71a5..0aec318bf0ef 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -591,7 +591,7 @@ list_retry:
591 if (nlsze_mult < 4) { 591 if (nlsze_mult < 4) {
592 rcu_read_unlock(); 592 rcu_read_unlock();
593 kfree_skb(ans_skb); 593 kfree_skb(ans_skb);
594 nlsze_mult++; 594 nlsze_mult *= 2;
595 goto list_start; 595 goto list_start;
596 } 596 }
597list_failure_lock: 597list_failure_lock:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 349aba189558..98bfe277eab2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -759,7 +759,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
759 * 0: continue 759 * 0: continue
760 * 1: repeat lookup - reference dropped while waiting for socket memory. 760 * 1: repeat lookup - reference dropped while waiting for socket memory.
761 */ 761 */
762int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, 762int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
763 long *timeo, struct sock *ssk) 763 long *timeo, struct sock *ssk)
764{ 764{
765 struct netlink_sock *nlk; 765 struct netlink_sock *nlk;
@@ -892,7 +892,7 @@ retry:
892 return err; 892 return err;
893 } 893 }
894 894
895 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); 895 err = netlink_attachskb(sk, skb, &timeo, ssk);
896 if (err == 1) 896 if (err == 1)
897 goto retry; 897 goto retry;
898 if (err) 898 if (err)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4bae8b998cab..d41be0d66eb0 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -74,6 +74,18 @@ static const struct proto_ops nr_proto_ops;
74 */ 74 */
75static struct lock_class_key nr_netdev_xmit_lock_key; 75static struct lock_class_key nr_netdev_xmit_lock_key;
76 76
77static void nr_set_lockdep_one(struct net_device *dev,
78 struct netdev_queue *txq,
79 void *_unused)
80{
81 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
82}
83
84static void nr_set_lockdep_key(struct net_device *dev)
85{
86 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
87}
88
77/* 89/*
78 * Socket removal during an interrupt is now safe. 90 * Socket removal during an interrupt is now safe.
79 */ 91 */
@@ -475,13 +487,11 @@ static struct sock *nr_make_new(struct sock *osk)
475 sock_init_data(NULL, sk); 487 sock_init_data(NULL, sk);
476 488
477 sk->sk_type = osk->sk_type; 489 sk->sk_type = osk->sk_type;
478 sk->sk_socket = osk->sk_socket;
479 sk->sk_priority = osk->sk_priority; 490 sk->sk_priority = osk->sk_priority;
480 sk->sk_protocol = osk->sk_protocol; 491 sk->sk_protocol = osk->sk_protocol;
481 sk->sk_rcvbuf = osk->sk_rcvbuf; 492 sk->sk_rcvbuf = osk->sk_rcvbuf;
482 sk->sk_sndbuf = osk->sk_sndbuf; 493 sk->sk_sndbuf = osk->sk_sndbuf;
483 sk->sk_state = TCP_ESTABLISHED; 494 sk->sk_state = TCP_ESTABLISHED;
484 sk->sk_sleep = osk->sk_sleep;
485 sock_copy_flags(sk, osk); 495 sock_copy_flags(sk, osk);
486 496
487 skb_queue_head_init(&nr->ack_queue); 497 skb_queue_head_init(&nr->ack_queue);
@@ -538,11 +548,9 @@ static int nr_release(struct socket *sock)
538 sk->sk_state_change(sk); 548 sk->sk_state_change(sk);
539 sock_orphan(sk); 549 sock_orphan(sk);
540 sock_set_flag(sk, SOCK_DESTROY); 550 sock_set_flag(sk, SOCK_DESTROY);
541 sk->sk_socket = NULL;
542 break; 551 break;
543 552
544 default: 553 default:
545 sk->sk_socket = NULL;
546 break; 554 break;
547 } 555 }
548 556
@@ -810,13 +818,11 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
810 goto out_release; 818 goto out_release;
811 819
812 newsk = skb->sk; 820 newsk = skb->sk;
813 newsk->sk_socket = newsock; 821 sock_graft(newsk, newsock);
814 newsk->sk_sleep = &newsock->wait;
815 822
816 /* Now attach up the new socket */ 823 /* Now attach up the new socket */
817 kfree_skb(skb); 824 kfree_skb(skb);
818 sk_acceptq_removed(sk); 825 sk_acceptq_removed(sk);
819 newsock->sk = newsk;
820 826
821out_release: 827out_release:
822 release_sock(sk); 828 release_sock(sk);
@@ -1436,7 +1442,7 @@ static int __init nr_proto_init(void)
1436 free_netdev(dev); 1442 free_netdev(dev);
1437 goto fail; 1443 goto fail;
1438 } 1444 }
1439 lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key); 1445 nr_set_lockdep_key(dev);
1440 dev_nr[i] = dev; 1446 dev_nr[i] = dev;
1441 } 1447 }
1442 1448
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2cee87da4441..db792e02a37f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PACKET - implements raw packet sockets. 6 * PACKET - implements raw packet sockets.
7 * 7 *
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
@@ -188,6 +186,8 @@ struct packet_sock {
188 unsigned int pg_vec_order; 186 unsigned int pg_vec_order;
189 unsigned int pg_vec_pages; 187 unsigned int pg_vec_pages;
190 unsigned int pg_vec_len; 188 unsigned int pg_vec_len;
189 enum tpacket_versions tp_version;
190 unsigned int tp_hdrlen;
191#endif 191#endif
192}; 192};
193 193
@@ -203,14 +203,52 @@ struct packet_skb_cb {
203 203
204#ifdef CONFIG_PACKET_MMAP 204#ifdef CONFIG_PACKET_MMAP
205 205
206static inline struct tpacket_hdr *packet_lookup_frame(struct packet_sock *po, unsigned int position) 206static void *packet_lookup_frame(struct packet_sock *po, unsigned int position,
207 int status)
207{ 208{
208 unsigned int pg_vec_pos, frame_offset; 209 unsigned int pg_vec_pos, frame_offset;
210 union {
211 struct tpacket_hdr *h1;
212 struct tpacket2_hdr *h2;
213 void *raw;
214 } h;
209 215
210 pg_vec_pos = position / po->frames_per_block; 216 pg_vec_pos = position / po->frames_per_block;
211 frame_offset = position % po->frames_per_block; 217 frame_offset = position % po->frames_per_block;
212 218
213 return (struct tpacket_hdr *)(po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size)); 219 h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
220 switch (po->tp_version) {
221 case TPACKET_V1:
222 if (status != h.h1->tp_status ? TP_STATUS_USER :
223 TP_STATUS_KERNEL)
224 return NULL;
225 break;
226 case TPACKET_V2:
227 if (status != h.h2->tp_status ? TP_STATUS_USER :
228 TP_STATUS_KERNEL)
229 return NULL;
230 break;
231 }
232 return h.raw;
233}
234
235static void __packet_set_status(struct packet_sock *po, void *frame, int status)
236{
237 union {
238 struct tpacket_hdr *h1;
239 struct tpacket2_hdr *h2;
240 void *raw;
241 } h;
242
243 h.raw = frame;
244 switch (po->tp_version) {
245 case TPACKET_V1:
246 h.h1->tp_status = status;
247 break;
248 case TPACKET_V2:
249 h.h2->tp_status = status;
250 break;
251 }
214} 252}
215#endif 253#endif
216 254
@@ -553,14 +591,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
553 struct sock *sk; 591 struct sock *sk;
554 struct packet_sock *po; 592 struct packet_sock *po;
555 struct sockaddr_ll *sll; 593 struct sockaddr_ll *sll;
556 struct tpacket_hdr *h; 594 union {
595 struct tpacket_hdr *h1;
596 struct tpacket2_hdr *h2;
597 void *raw;
598 } h;
557 u8 * skb_head = skb->data; 599 u8 * skb_head = skb->data;
558 int skb_len = skb->len; 600 int skb_len = skb->len;
559 unsigned int snaplen, res; 601 unsigned int snaplen, res;
560 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; 602 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
561 unsigned short macoff, netoff; 603 unsigned short macoff, netoff, hdrlen;
562 struct sk_buff *copy_skb = NULL; 604 struct sk_buff *copy_skb = NULL;
563 struct timeval tv; 605 struct timeval tv;
606 struct timespec ts;
564 607
565 if (skb->pkt_type == PACKET_LOOPBACK) 608 if (skb->pkt_type == PACKET_LOOPBACK)
566 goto drop; 609 goto drop;
@@ -592,10 +635,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
592 snaplen = res; 635 snaplen = res;
593 636
594 if (sk->sk_type == SOCK_DGRAM) { 637 if (sk->sk_type == SOCK_DGRAM) {
595 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16; 638 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16;
596 } else { 639 } else {
597 unsigned maclen = skb_network_offset(skb); 640 unsigned maclen = skb_network_offset(skb);
598 netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen)); 641 netoff = TPACKET_ALIGN(po->tp_hdrlen +
642 (maclen < 16 ? 16 : maclen));
599 macoff = netoff - maclen; 643 macoff = netoff - maclen;
600 } 644 }
601 645
@@ -618,9 +662,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
618 } 662 }
619 663
620 spin_lock(&sk->sk_receive_queue.lock); 664 spin_lock(&sk->sk_receive_queue.lock);
621 h = packet_lookup_frame(po, po->head); 665 h.raw = packet_lookup_frame(po, po->head, TP_STATUS_KERNEL);
622 666 if (!h.raw)
623 if (h->tp_status)
624 goto ring_is_full; 667 goto ring_is_full;
625 po->head = po->head != po->frame_max ? po->head+1 : 0; 668 po->head = po->head != po->frame_max ? po->head+1 : 0;
626 po->stats.tp_packets++; 669 po->stats.tp_packets++;
@@ -632,20 +675,41 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
632 status &= ~TP_STATUS_LOSING; 675 status &= ~TP_STATUS_LOSING;
633 spin_unlock(&sk->sk_receive_queue.lock); 676 spin_unlock(&sk->sk_receive_queue.lock);
634 677
635 skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen); 678 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
636 679
637 h->tp_len = skb->len; 680 switch (po->tp_version) {
638 h->tp_snaplen = snaplen; 681 case TPACKET_V1:
639 h->tp_mac = macoff; 682 h.h1->tp_len = skb->len;
640 h->tp_net = netoff; 683 h.h1->tp_snaplen = snaplen;
641 if (skb->tstamp.tv64) 684 h.h1->tp_mac = macoff;
642 tv = ktime_to_timeval(skb->tstamp); 685 h.h1->tp_net = netoff;
643 else 686 if (skb->tstamp.tv64)
644 do_gettimeofday(&tv); 687 tv = ktime_to_timeval(skb->tstamp);
645 h->tp_sec = tv.tv_sec; 688 else
646 h->tp_usec = tv.tv_usec; 689 do_gettimeofday(&tv);
690 h.h1->tp_sec = tv.tv_sec;
691 h.h1->tp_usec = tv.tv_usec;
692 hdrlen = sizeof(*h.h1);
693 break;
694 case TPACKET_V2:
695 h.h2->tp_len = skb->len;
696 h.h2->tp_snaplen = snaplen;
697 h.h2->tp_mac = macoff;
698 h.h2->tp_net = netoff;
699 if (skb->tstamp.tv64)
700 ts = ktime_to_timespec(skb->tstamp);
701 else
702 getnstimeofday(&ts);
703 h.h2->tp_sec = ts.tv_sec;
704 h.h2->tp_nsec = ts.tv_nsec;
705 h.h2->tp_vlan_tci = skb->vlan_tci;
706 hdrlen = sizeof(*h.h2);
707 break;
708 default:
709 BUG();
710 }
647 711
648 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); 712 sll = h.raw + TPACKET_ALIGN(hdrlen);
649 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 713 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
650 sll->sll_family = AF_PACKET; 714 sll->sll_family = AF_PACKET;
651 sll->sll_hatype = dev->type; 715 sll->sll_hatype = dev->type;
@@ -656,14 +720,14 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
656 else 720 else
657 sll->sll_ifindex = dev->ifindex; 721 sll->sll_ifindex = dev->ifindex;
658 722
659 h->tp_status = status; 723 __packet_set_status(po, h.raw, status);
660 smp_mb(); 724 smp_mb();
661 725
662 { 726 {
663 struct page *p_start, *p_end; 727 struct page *p_start, *p_end;
664 u8 *h_end = (u8 *)h + macoff + snaplen - 1; 728 u8 *h_end = h.raw + macoff + snaplen - 1;
665 729
666 p_start = virt_to_page(h); 730 p_start = virt_to_page(h.raw);
667 p_end = virt_to_page(h_end); 731 p_end = virt_to_page(h_end);
668 while (p_start <= p_end) { 732 while (p_start <= p_end) {
669 flush_dcache_page(p_start); 733 flush_dcache_page(p_start);
@@ -1109,6 +1173,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1109 aux.tp_snaplen = skb->len; 1173 aux.tp_snaplen = skb->len;
1110 aux.tp_mac = 0; 1174 aux.tp_mac = 0;
1111 aux.tp_net = skb_network_offset(skb); 1175 aux.tp_net = skb_network_offset(skb);
1176 aux.tp_vlan_tci = skb->vlan_tci;
1112 1177
1113 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1178 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1114 } 1179 }
@@ -1175,7 +1240,8 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1175 return 0; 1240 return 0;
1176} 1241}
1177 1242
1178static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) 1243static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1244 int what)
1179{ 1245{
1180 switch (i->type) { 1246 switch (i->type) {
1181 case PACKET_MR_MULTICAST: 1247 case PACKET_MR_MULTICAST:
@@ -1185,13 +1251,14 @@ static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int w
1185 dev_mc_delete(dev, i->addr, i->alen, 0); 1251 dev_mc_delete(dev, i->addr, i->alen, 0);
1186 break; 1252 break;
1187 case PACKET_MR_PROMISC: 1253 case PACKET_MR_PROMISC:
1188 dev_set_promiscuity(dev, what); 1254 return dev_set_promiscuity(dev, what);
1189 break; 1255 break;
1190 case PACKET_MR_ALLMULTI: 1256 case PACKET_MR_ALLMULTI:
1191 dev_set_allmulti(dev, what); 1257 return dev_set_allmulti(dev, what);
1192 break; 1258 break;
1193 default:; 1259 default:;
1194 } 1260 }
1261 return 0;
1195} 1262}
1196 1263
1197static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) 1264static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
@@ -1245,7 +1312,11 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1245 i->count = 1; 1312 i->count = 1;
1246 i->next = po->mclist; 1313 i->next = po->mclist;
1247 po->mclist = i; 1314 po->mclist = i;
1248 packet_dev_mc(dev, i, +1); 1315 err = packet_dev_mc(dev, i, 1);
1316 if (err) {
1317 po->mclist = i->next;
1318 kfree(i);
1319 }
1249 1320
1250done: 1321done:
1251 rtnl_unlock(); 1322 rtnl_unlock();
@@ -1358,6 +1429,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1358 pkt_sk(sk)->copy_thresh = val; 1429 pkt_sk(sk)->copy_thresh = val;
1359 return 0; 1430 return 0;
1360 } 1431 }
1432 case PACKET_VERSION:
1433 {
1434 int val;
1435
1436 if (optlen != sizeof(val))
1437 return -EINVAL;
1438 if (po->pg_vec)
1439 return -EBUSY;
1440 if (copy_from_user(&val, optval, sizeof(val)))
1441 return -EFAULT;
1442 switch (val) {
1443 case TPACKET_V1:
1444 case TPACKET_V2:
1445 po->tp_version = val;
1446 return 0;
1447 default:
1448 return -EINVAL;
1449 }
1450 }
1361#endif 1451#endif
1362 case PACKET_AUXDATA: 1452 case PACKET_AUXDATA:
1363 { 1453 {
@@ -1433,6 +1523,31 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1433 1523
1434 data = &val; 1524 data = &val;
1435 break; 1525 break;
1526#ifdef CONFIG_PACKET_MMAP
1527 case PACKET_VERSION:
1528 if (len > sizeof(int))
1529 len = sizeof(int);
1530 val = po->tp_version;
1531 data = &val;
1532 break;
1533 case PACKET_HDRLEN:
1534 if (len > sizeof(int))
1535 len = sizeof(int);
1536 if (copy_from_user(&val, optval, len))
1537 return -EFAULT;
1538 switch (val) {
1539 case TPACKET_V1:
1540 val = sizeof(struct tpacket_hdr);
1541 break;
1542 case TPACKET_V2:
1543 val = sizeof(struct tpacket2_hdr);
1544 break;
1545 default:
1546 return -EINVAL;
1547 }
1548 data = &val;
1549 break;
1550#endif
1436 default: 1551 default:
1437 return -ENOPROTOOPT; 1552 return -ENOPROTOOPT;
1438 } 1553 }
@@ -1566,11 +1681,8 @@ static unsigned int packet_poll(struct file * file, struct socket *sock,
1566 spin_lock_bh(&sk->sk_receive_queue.lock); 1681 spin_lock_bh(&sk->sk_receive_queue.lock);
1567 if (po->pg_vec) { 1682 if (po->pg_vec) {
1568 unsigned last = po->head ? po->head-1 : po->frame_max; 1683 unsigned last = po->head ? po->head-1 : po->frame_max;
1569 struct tpacket_hdr *h;
1570 1684
1571 h = packet_lookup_frame(po, last); 1685 if (packet_lookup_frame(po, last, TP_STATUS_USER))
1572
1573 if (h->tp_status)
1574 mask |= POLLIN | POLLRDNORM; 1686 mask |= POLLIN | POLLRDNORM;
1575 } 1687 }
1576 spin_unlock_bh(&sk->sk_receive_queue.lock); 1688 spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -1665,11 +1777,20 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1665 if (unlikely(po->pg_vec)) 1777 if (unlikely(po->pg_vec))
1666 return -EBUSY; 1778 return -EBUSY;
1667 1779
1780 switch (po->tp_version) {
1781 case TPACKET_V1:
1782 po->tp_hdrlen = TPACKET_HDRLEN;
1783 break;
1784 case TPACKET_V2:
1785 po->tp_hdrlen = TPACKET2_HDRLEN;
1786 break;
1787 }
1788
1668 if (unlikely((int)req->tp_block_size <= 0)) 1789 if (unlikely((int)req->tp_block_size <= 0))
1669 return -EINVAL; 1790 return -EINVAL;
1670 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 1791 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1671 return -EINVAL; 1792 return -EINVAL;
1672 if (unlikely(req->tp_frame_size < TPACKET_HDRLEN)) 1793 if (unlikely(req->tp_frame_size < po->tp_hdrlen))
1673 return -EINVAL; 1794 return -EINVAL;
1674 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 1795 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1675 return -EINVAL; 1796 return -EINVAL;
@@ -1688,13 +1809,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1688 goto out; 1809 goto out;
1689 1810
1690 for (i = 0; i < req->tp_block_nr; i++) { 1811 for (i = 0; i < req->tp_block_nr; i++) {
1691 char *ptr = pg_vec[i]; 1812 void *ptr = pg_vec[i];
1692 struct tpacket_hdr *header;
1693 int k; 1813 int k;
1694 1814
1695 for (k = 0; k < po->frames_per_block; k++) { 1815 for (k = 0; k < po->frames_per_block; k++) {
1696 header = (struct tpacket_hdr *) ptr; 1816 __packet_set_status(po, ptr, TP_STATUS_KERNEL);
1697 header->tp_status = TP_STATUS_KERNEL;
1698 ptr += req->tp_frame_size; 1817 ptr += req->tp_frame_size;
1699 } 1818 }
1700 } 1819 }
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index e4b051dbed61..8aa822730145 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -30,39 +30,62 @@ struct rfkill_task {
30 spinlock_t lock; /* for accessing last and desired state */ 30 spinlock_t lock; /* for accessing last and desired state */
31 unsigned long last; /* last schedule */ 31 unsigned long last; /* last schedule */
32 enum rfkill_state desired_state; /* on/off */ 32 enum rfkill_state desired_state; /* on/off */
33 enum rfkill_state current_state; /* on/off */
34}; 33};
35 34
36static void rfkill_task_handler(struct work_struct *work) 35static void rfkill_task_handler(struct work_struct *work)
37{ 36{
38 struct rfkill_task *task = container_of(work, struct rfkill_task, work); 37 struct rfkill_task *task = container_of(work, struct rfkill_task, work);
39 enum rfkill_state state;
40 38
41 mutex_lock(&task->mutex); 39 mutex_lock(&task->mutex);
42 40
43 /* 41 rfkill_switch_all(task->type, task->desired_state);
44 * Use temp variable to fetch desired state to keep it
45 * consistent even if rfkill_schedule_toggle() runs in
46 * another thread or interrupts us.
47 */
48 state = task->desired_state;
49 42
50 if (state != task->current_state) { 43 mutex_unlock(&task->mutex);
51 rfkill_switch_all(task->type, state); 44}
52 task->current_state = state; 45
46static void rfkill_task_epo_handler(struct work_struct *work)
47{
48 rfkill_epo();
49}
50
51static DECLARE_WORK(epo_work, rfkill_task_epo_handler);
52
53static void rfkill_schedule_epo(void)
54{
55 schedule_work(&epo_work);
56}
57
58static void rfkill_schedule_set(struct rfkill_task *task,
59 enum rfkill_state desired_state)
60{
61 unsigned long flags;
62
63 if (unlikely(work_pending(&epo_work)))
64 return;
65
66 spin_lock_irqsave(&task->lock, flags);
67
68 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) {
69 task->desired_state = desired_state;
70 task->last = jiffies;
71 schedule_work(&task->work);
53 } 72 }
54 73
55 mutex_unlock(&task->mutex); 74 spin_unlock_irqrestore(&task->lock, flags);
56} 75}
57 76
58static void rfkill_schedule_toggle(struct rfkill_task *task) 77static void rfkill_schedule_toggle(struct rfkill_task *task)
59{ 78{
60 unsigned long flags; 79 unsigned long flags;
61 80
81 if (unlikely(work_pending(&epo_work)))
82 return;
83
62 spin_lock_irqsave(&task->lock, flags); 84 spin_lock_irqsave(&task->lock, flags);
63 85
64 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { 86 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) {
65 task->desired_state = !task->desired_state; 87 task->desired_state =
88 rfkill_state_complement(task->desired_state);
66 task->last = jiffies; 89 task->last = jiffies;
67 schedule_work(&task->work); 90 schedule_work(&task->work);
68 } 91 }
@@ -70,26 +93,26 @@ static void rfkill_schedule_toggle(struct rfkill_task *task)
70 spin_unlock_irqrestore(&task->lock, flags); 93 spin_unlock_irqrestore(&task->lock, flags);
71} 94}
72 95
73#define DEFINE_RFKILL_TASK(n, t) \ 96#define DEFINE_RFKILL_TASK(n, t) \
74 struct rfkill_task n = { \ 97 struct rfkill_task n = { \
75 .work = __WORK_INITIALIZER(n.work, \ 98 .work = __WORK_INITIALIZER(n.work, \
76 rfkill_task_handler), \ 99 rfkill_task_handler), \
77 .type = t, \ 100 .type = t, \
78 .mutex = __MUTEX_INITIALIZER(n.mutex), \ 101 .mutex = __MUTEX_INITIALIZER(n.mutex), \
79 .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ 102 .lock = __SPIN_LOCK_UNLOCKED(n.lock), \
80 .desired_state = RFKILL_STATE_ON, \ 103 .desired_state = RFKILL_STATE_UNBLOCKED, \
81 .current_state = RFKILL_STATE_ON, \
82 } 104 }
83 105
84static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); 106static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN);
85static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); 107static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH);
86static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); 108static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB);
87static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); 109static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX);
110static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN);
88 111
89static void rfkill_event(struct input_handle *handle, unsigned int type, 112static void rfkill_event(struct input_handle *handle, unsigned int type,
90 unsigned int code, int down) 113 unsigned int code, int data)
91{ 114{
92 if (type == EV_KEY && down == 1) { 115 if (type == EV_KEY && data == 1) {
93 switch (code) { 116 switch (code) {
94 case KEY_WLAN: 117 case KEY_WLAN:
95 rfkill_schedule_toggle(&rfkill_wlan); 118 rfkill_schedule_toggle(&rfkill_wlan);
@@ -106,6 +129,28 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
106 default: 129 default:
107 break; 130 break;
108 } 131 }
132 } else if (type == EV_SW) {
133 switch (code) {
134 case SW_RFKILL_ALL:
135 /* EVERY radio type. data != 0 means radios ON */
136 /* handle EPO (emergency power off) through shortcut */
137 if (data) {
138 rfkill_schedule_set(&rfkill_wwan,
139 RFKILL_STATE_UNBLOCKED);
140 rfkill_schedule_set(&rfkill_wimax,
141 RFKILL_STATE_UNBLOCKED);
142 rfkill_schedule_set(&rfkill_uwb,
143 RFKILL_STATE_UNBLOCKED);
144 rfkill_schedule_set(&rfkill_bt,
145 RFKILL_STATE_UNBLOCKED);
146 rfkill_schedule_set(&rfkill_wlan,
147 RFKILL_STATE_UNBLOCKED);
148 } else
149 rfkill_schedule_epo();
150 break;
151 default:
152 break;
153 }
109 } 154 }
110} 155}
111 156
@@ -168,6 +213,11 @@ static const struct input_device_id rfkill_ids[] = {
168 .evbit = { BIT_MASK(EV_KEY) }, 213 .evbit = { BIT_MASK(EV_KEY) },
169 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, 214 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
170 }, 215 },
216 {
217 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
218 .evbit = { BIT(EV_SW) },
219 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
220 },
171 { } 221 { }
172}; 222};
173 223
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h
index 4dae5006fc77..f63d05045685 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill-input.h
@@ -12,5 +12,6 @@
12#define __RFKILL_INPUT_H 12#define __RFKILL_INPUT_H
13 13
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
15void rfkill_epo(void);
15 16
16#endif /* __RFKILL_INPUT_H */ 17#endif /* __RFKILL_INPUT_H */
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 4e10a95de832..7a560b785097 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -39,8 +39,56 @@ MODULE_LICENSE("GPL");
39static LIST_HEAD(rfkill_list); /* list of registered rf switches */ 39static LIST_HEAD(rfkill_list); /* list of registered rf switches */
40static DEFINE_MUTEX(rfkill_mutex); 40static DEFINE_MUTEX(rfkill_mutex);
41 41
42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
43module_param_named(default_state, rfkill_default_state, uint, 0444);
44MODULE_PARM_DESC(default_state,
45 "Default initial state for all radio types, 0 = radio off");
46
42static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; 47static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX];
43 48
49static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
50
51
52/**
53 * register_rfkill_notifier - Add notifier to rfkill notifier chain
54 * @nb: pointer to the new entry to add to the chain
55 *
56 * See blocking_notifier_chain_register() for return value and further
57 * observations.
58 *
59 * Adds a notifier to the rfkill notifier chain. The chain will be
60 * called with a pointer to the relevant rfkill structure as a parameter,
61 * refer to include/linux/rfkill.h for the possible events.
62 *
63 * Notifiers added to this chain are to always return NOTIFY_DONE. This
64 * chain is a blocking notifier chain: notifiers can sleep.
65 *
66 * Calls to this chain may have been done through a workqueue. One must
67 * assume unordered asynchronous behaviour, there is no way to know if
68 * actions related to the event that generated the notification have been
69 * carried out already.
70 */
71int register_rfkill_notifier(struct notifier_block *nb)
72{
73 return blocking_notifier_chain_register(&rfkill_notifier_list, nb);
74}
75EXPORT_SYMBOL_GPL(register_rfkill_notifier);
76
77/**
78 * unregister_rfkill_notifier - remove notifier from rfkill notifier chain
79 * @nb: pointer to the entry to remove from the chain
80 *
81 * See blocking_notifier_chain_unregister() for return value and further
82 * observations.
83 *
84 * Removes a notifier from the rfkill notifier chain.
85 */
86int unregister_rfkill_notifier(struct notifier_block *nb)
87{
88 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb);
89}
90EXPORT_SYMBOL_GPL(unregister_rfkill_notifier);
91
44 92
45static void rfkill_led_trigger(struct rfkill *rfkill, 93static void rfkill_led_trigger(struct rfkill *rfkill,
46 enum rfkill_state state) 94 enum rfkill_state state)
@@ -50,24 +98,101 @@ static void rfkill_led_trigger(struct rfkill *rfkill,
50 98
51 if (!led->name) 99 if (!led->name)
52 return; 100 return;
53 if (state == RFKILL_STATE_OFF) 101 if (state != RFKILL_STATE_UNBLOCKED)
54 led_trigger_event(led, LED_OFF); 102 led_trigger_event(led, LED_OFF);
55 else 103 else
56 led_trigger_event(led, LED_FULL); 104 led_trigger_event(led, LED_FULL);
57#endif /* CONFIG_RFKILL_LEDS */ 105#endif /* CONFIG_RFKILL_LEDS */
58} 106}
59 107
108static void notify_rfkill_state_change(struct rfkill *rfkill)
109{
110 blocking_notifier_call_chain(&rfkill_notifier_list,
111 RFKILL_STATE_CHANGED,
112 rfkill);
113}
114
115static void update_rfkill_state(struct rfkill *rfkill)
116{
117 enum rfkill_state newstate, oldstate;
118
119 if (rfkill->get_state) {
120 mutex_lock(&rfkill->mutex);
121 if (!rfkill->get_state(rfkill->data, &newstate)) {
122 oldstate = rfkill->state;
123 rfkill->state = newstate;
124 if (oldstate != newstate)
125 notify_rfkill_state_change(rfkill);
126 }
127 mutex_unlock(&rfkill->mutex);
128 }
129}
130
131/**
132 * rfkill_toggle_radio - wrapper for toggle_radio hook
133 *
134 * @rfkill: the rfkill struct to use
135 * @force: calls toggle_radio even if cache says it is not needed,
136 * and also makes sure notifications of the state will be
137 * sent even if it didn't change
138 * @state: the new state to call toggle_radio() with
139 *
140 * Calls rfkill->toggle_radio, enforcing the API for toggle_radio
141 * calls and handling all the red tape such as issuing notifications
142 * if the call is successful.
143 *
144 * Note that @force cannot override a (possibly cached) state of
145 * RFKILL_STATE_HARD_BLOCKED. Any device making use of
146 * RFKILL_STATE_HARD_BLOCKED implements either get_state() or
147 * rfkill_force_state(), so the cache either is bypassed or valid.
148 *
149 * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED
150 * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to
151 * give the driver a hint that it should double-BLOCK the transmitter.
152 *
153 * Caller must have aquired rfkill_mutex.
154 */
60static int rfkill_toggle_radio(struct rfkill *rfkill, 155static int rfkill_toggle_radio(struct rfkill *rfkill,
61 enum rfkill_state state) 156 enum rfkill_state state,
157 int force)
62{ 158{
63 int retval = 0; 159 int retval = 0;
160 enum rfkill_state oldstate, newstate;
161
162 oldstate = rfkill->state;
163
164 if (rfkill->get_state && !force &&
165 !rfkill->get_state(rfkill->data, &newstate))
166 rfkill->state = newstate;
167
168 switch (state) {
169 case RFKILL_STATE_HARD_BLOCKED:
170 /* typically happens when refreshing hardware state,
171 * such as on resume */
172 state = RFKILL_STATE_SOFT_BLOCKED;
173 break;
174 case RFKILL_STATE_UNBLOCKED:
175 /* force can't override this, only rfkill_force_state() can */
176 if (rfkill->state == RFKILL_STATE_HARD_BLOCKED)
177 return -EPERM;
178 break;
179 case RFKILL_STATE_SOFT_BLOCKED:
180 /* nothing to do, we want to give drivers the hint to double
181 * BLOCK even a transmitter that is already in state
182 * RFKILL_STATE_HARD_BLOCKED */
183 break;
184 }
64 185
65 if (state != rfkill->state) { 186 if (force || state != rfkill->state) {
66 retval = rfkill->toggle_radio(rfkill->data, state); 187 retval = rfkill->toggle_radio(rfkill->data, state);
67 if (!retval) { 188 /* never allow a HARD->SOFT downgrade! */
189 if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED)
68 rfkill->state = state; 190 rfkill->state = state;
69 rfkill_led_trigger(rfkill, state); 191 }
70 } 192
193 if (force || rfkill->state != oldstate) {
194 rfkill_led_trigger(rfkill, rfkill->state);
195 notify_rfkill_state_change(rfkill);
71 } 196 }
72 197
73 return retval; 198 return retval;
@@ -82,7 +207,6 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
82 * a specific switch is claimed by userspace in which case it is 207 * a specific switch is claimed by userspace in which case it is
83 * left alone. 208 * left alone.
84 */ 209 */
85
86void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 210void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
87{ 211{
88 struct rfkill *rfkill; 212 struct rfkill *rfkill;
@@ -93,13 +217,66 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
93 217
94 list_for_each_entry(rfkill, &rfkill_list, node) { 218 list_for_each_entry(rfkill, &rfkill_list, node) {
95 if ((!rfkill->user_claim) && (rfkill->type == type)) 219 if ((!rfkill->user_claim) && (rfkill->type == type))
96 rfkill_toggle_radio(rfkill, state); 220 rfkill_toggle_radio(rfkill, state, 0);
97 } 221 }
98 222
99 mutex_unlock(&rfkill_mutex); 223 mutex_unlock(&rfkill_mutex);
100} 224}
101EXPORT_SYMBOL(rfkill_switch_all); 225EXPORT_SYMBOL(rfkill_switch_all);
102 226
227/**
228 * rfkill_epo - emergency power off all transmitters
229 *
230 * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring
231 * everything in its path but rfkill_mutex.
232 */
233void rfkill_epo(void)
234{
235 struct rfkill *rfkill;
236
237 mutex_lock(&rfkill_mutex);
238 list_for_each_entry(rfkill, &rfkill_list, node) {
239 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
240 }
241 mutex_unlock(&rfkill_mutex);
242}
243EXPORT_SYMBOL_GPL(rfkill_epo);
244
245/**
246 * rfkill_force_state - Force the internal rfkill radio state
247 * @rfkill: pointer to the rfkill class to modify.
248 * @state: the current radio state the class should be forced to.
249 *
250 * This function updates the internal state of the radio cached
251 * by the rfkill class. It should be used when the driver gets
252 * a notification by the firmware/hardware of the current *real*
253 * state of the radio rfkill switch.
254 *
255 * It may not be called from an atomic context.
256 */
257int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
258{
259 enum rfkill_state oldstate;
260
261 if (state != RFKILL_STATE_SOFT_BLOCKED &&
262 state != RFKILL_STATE_UNBLOCKED &&
263 state != RFKILL_STATE_HARD_BLOCKED)
264 return -EINVAL;
265
266 mutex_lock(&rfkill->mutex);
267
268 oldstate = rfkill->state;
269 rfkill->state = state;
270
271 if (state != oldstate)
272 notify_rfkill_state_change(rfkill);
273
274 mutex_unlock(&rfkill->mutex);
275
276 return 0;
277}
278EXPORT_SYMBOL(rfkill_force_state);
279
103static ssize_t rfkill_name_show(struct device *dev, 280static ssize_t rfkill_name_show(struct device *dev,
104 struct device_attribute *attr, 281 struct device_attribute *attr,
105 char *buf) 282 char *buf)
@@ -109,31 +286,31 @@ static ssize_t rfkill_name_show(struct device *dev,
109 return sprintf(buf, "%s\n", rfkill->name); 286 return sprintf(buf, "%s\n", rfkill->name);
110} 287}
111 288
112static ssize_t rfkill_type_show(struct device *dev, 289static const char *rfkill_get_type_str(enum rfkill_type type)
113 struct device_attribute *attr,
114 char *buf)
115{ 290{
116 struct rfkill *rfkill = to_rfkill(dev); 291 switch (type) {
117 const char *type;
118
119 switch (rfkill->type) {
120 case RFKILL_TYPE_WLAN: 292 case RFKILL_TYPE_WLAN:
121 type = "wlan"; 293 return "wlan";
122 break;
123 case RFKILL_TYPE_BLUETOOTH: 294 case RFKILL_TYPE_BLUETOOTH:
124 type = "bluetooth"; 295 return "bluetooth";
125 break;
126 case RFKILL_TYPE_UWB: 296 case RFKILL_TYPE_UWB:
127 type = "ultrawideband"; 297 return "ultrawideband";
128 break;
129 case RFKILL_TYPE_WIMAX: 298 case RFKILL_TYPE_WIMAX:
130 type = "wimax"; 299 return "wimax";
131 break; 300 case RFKILL_TYPE_WWAN:
301 return "wwan";
132 default: 302 default:
133 BUG(); 303 BUG();
134 } 304 }
305}
306
307static ssize_t rfkill_type_show(struct device *dev,
308 struct device_attribute *attr,
309 char *buf)
310{
311 struct rfkill *rfkill = to_rfkill(dev);
135 312
136 return sprintf(buf, "%s\n", type); 313 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
137} 314}
138 315
139static ssize_t rfkill_state_show(struct device *dev, 316static ssize_t rfkill_state_show(struct device *dev,
@@ -142,6 +319,7 @@ static ssize_t rfkill_state_show(struct device *dev,
142{ 319{
143 struct rfkill *rfkill = to_rfkill(dev); 320 struct rfkill *rfkill = to_rfkill(dev);
144 321
322 update_rfkill_state(rfkill);
145 return sprintf(buf, "%d\n", rfkill->state); 323 return sprintf(buf, "%d\n", rfkill->state);
146} 324}
147 325
@@ -156,10 +334,14 @@ static ssize_t rfkill_state_store(struct device *dev,
156 if (!capable(CAP_NET_ADMIN)) 334 if (!capable(CAP_NET_ADMIN))
157 return -EPERM; 335 return -EPERM;
158 336
337 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
338 if (state != RFKILL_STATE_UNBLOCKED &&
339 state != RFKILL_STATE_SOFT_BLOCKED)
340 return -EINVAL;
341
159 if (mutex_lock_interruptible(&rfkill->mutex)) 342 if (mutex_lock_interruptible(&rfkill->mutex))
160 return -ERESTARTSYS; 343 return -ERESTARTSYS;
161 error = rfkill_toggle_radio(rfkill, 344 error = rfkill_toggle_radio(rfkill, state, 0);
162 state ? RFKILL_STATE_ON : RFKILL_STATE_OFF);
163 mutex_unlock(&rfkill->mutex); 345 mutex_unlock(&rfkill->mutex);
164 346
165 return error ? error : count; 347 return error ? error : count;
@@ -200,7 +382,8 @@ static ssize_t rfkill_claim_store(struct device *dev,
200 if (rfkill->user_claim != claim) { 382 if (rfkill->user_claim != claim) {
201 if (!claim) 383 if (!claim)
202 rfkill_toggle_radio(rfkill, 384 rfkill_toggle_radio(rfkill,
203 rfkill_states[rfkill->type]); 385 rfkill_states[rfkill->type],
386 0);
204 rfkill->user_claim = claim; 387 rfkill->user_claim = claim;
205 } 388 }
206 389
@@ -233,12 +416,12 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
233 416
234 if (dev->power.power_state.event != state.event) { 417 if (dev->power.power_state.event != state.event) {
235 if (state.event & PM_EVENT_SLEEP) { 418 if (state.event & PM_EVENT_SLEEP) {
236 mutex_lock(&rfkill->mutex); 419 /* Stop transmitter, keep state, no notifies */
237 420 update_rfkill_state(rfkill);
238 if (rfkill->state == RFKILL_STATE_ON)
239 rfkill->toggle_radio(rfkill->data,
240 RFKILL_STATE_OFF);
241 421
422 mutex_lock(&rfkill->mutex);
423 rfkill->toggle_radio(rfkill->data,
424 RFKILL_STATE_SOFT_BLOCKED);
242 mutex_unlock(&rfkill->mutex); 425 mutex_unlock(&rfkill->mutex);
243 } 426 }
244 427
@@ -255,8 +438,8 @@ static int rfkill_resume(struct device *dev)
255 if (dev->power.power_state.event != PM_EVENT_ON) { 438 if (dev->power.power_state.event != PM_EVENT_ON) {
256 mutex_lock(&rfkill->mutex); 439 mutex_lock(&rfkill->mutex);
257 440
258 if (rfkill->state == RFKILL_STATE_ON) 441 /* restore radio state AND notify everybody */
259 rfkill->toggle_radio(rfkill->data, RFKILL_STATE_ON); 442 rfkill_toggle_radio(rfkill, rfkill->state, 1);
260 443
261 mutex_unlock(&rfkill->mutex); 444 mutex_unlock(&rfkill->mutex);
262 } 445 }
@@ -269,34 +452,71 @@ static int rfkill_resume(struct device *dev)
269#define rfkill_resume NULL 452#define rfkill_resume NULL
270#endif 453#endif
271 454
455static int rfkill_blocking_uevent_notifier(struct notifier_block *nb,
456 unsigned long eventid,
457 void *data)
458{
459 struct rfkill *rfkill = (struct rfkill *)data;
460
461 switch (eventid) {
462 case RFKILL_STATE_CHANGED:
463 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
464 break;
465 default:
466 break;
467 }
468
469 return NOTIFY_DONE;
470}
471
472static struct notifier_block rfkill_blocking_uevent_nb = {
473 .notifier_call = rfkill_blocking_uevent_notifier,
474 .priority = 0,
475};
476
477static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
478{
479 struct rfkill *rfkill = to_rfkill(dev);
480 int error;
481
482 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
483 if (error)
484 return error;
485 error = add_uevent_var(env, "RFKILL_TYPE=%s",
486 rfkill_get_type_str(rfkill->type));
487 if (error)
488 return error;
489 error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state);
490 return error;
491}
492
272static struct class rfkill_class = { 493static struct class rfkill_class = {
273 .name = "rfkill", 494 .name = "rfkill",
274 .dev_release = rfkill_release, 495 .dev_release = rfkill_release,
275 .dev_attrs = rfkill_dev_attrs, 496 .dev_attrs = rfkill_dev_attrs,
276 .suspend = rfkill_suspend, 497 .suspend = rfkill_suspend,
277 .resume = rfkill_resume, 498 .resume = rfkill_resume,
499 .dev_uevent = rfkill_dev_uevent,
278}; 500};
279 501
280static int rfkill_add_switch(struct rfkill *rfkill) 502static int rfkill_add_switch(struct rfkill *rfkill)
281{ 503{
282 int error;
283
284 mutex_lock(&rfkill_mutex); 504 mutex_lock(&rfkill_mutex);
285 505
286 error = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]); 506 rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0);
287 if (!error) 507
288 list_add_tail(&rfkill->node, &rfkill_list); 508 list_add_tail(&rfkill->node, &rfkill_list);
289 509
290 mutex_unlock(&rfkill_mutex); 510 mutex_unlock(&rfkill_mutex);
291 511
292 return error; 512 return 0;
293} 513}
294 514
295static void rfkill_remove_switch(struct rfkill *rfkill) 515static void rfkill_remove_switch(struct rfkill *rfkill)
296{ 516{
297 mutex_lock(&rfkill_mutex); 517 mutex_lock(&rfkill_mutex);
298 list_del_init(&rfkill->node); 518 list_del_init(&rfkill->node);
299 rfkill_toggle_radio(rfkill, RFKILL_STATE_OFF); 519 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
300 mutex_unlock(&rfkill_mutex); 520 mutex_unlock(&rfkill_mutex);
301} 521}
302 522
@@ -412,7 +632,7 @@ int rfkill_register(struct rfkill *rfkill)
412EXPORT_SYMBOL(rfkill_register); 632EXPORT_SYMBOL(rfkill_register);
413 633
414/** 634/**
415 * rfkill_unregister - Uegister a rfkill structure. 635 * rfkill_unregister - Unregister a rfkill structure.
416 * @rfkill: rfkill structure to be unregistered 636 * @rfkill: rfkill structure to be unregistered
417 * 637 *
418 * This function should be called by the network driver during device 638 * This function should be called by the network driver during device
@@ -436,8 +656,13 @@ static int __init rfkill_init(void)
436 int error; 656 int error;
437 int i; 657 int i;
438 658
659 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
660 if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED &&
661 rfkill_default_state != RFKILL_STATE_UNBLOCKED)
662 return -EINVAL;
663
439 for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) 664 for (i = 0; i < ARRAY_SIZE(rfkill_states); i++)
440 rfkill_states[i] = RFKILL_STATE_ON; 665 rfkill_states[i] = rfkill_default_state;
441 666
442 error = class_register(&rfkill_class); 667 error = class_register(&rfkill_class);
443 if (error) { 668 if (error) {
@@ -445,11 +670,14 @@ static int __init rfkill_init(void)
445 return error; 670 return error;
446 } 671 }
447 672
673 register_rfkill_notifier(&rfkill_blocking_uevent_nb);
674
448 return 0; 675 return 0;
449} 676}
450 677
451static void __exit rfkill_exit(void) 678static void __exit rfkill_exit(void)
452{ 679{
680 unregister_rfkill_notifier(&rfkill_blocking_uevent_nb);
453 class_unregister(&rfkill_class); 681 class_unregister(&rfkill_class);
454} 682}
455 683
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 1ebf65294405..f3a691f34909 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -75,6 +75,18 @@ ax25_address rose_callsign;
75 */ 75 */
76static struct lock_class_key rose_netdev_xmit_lock_key; 76static struct lock_class_key rose_netdev_xmit_lock_key;
77 77
78static void rose_set_lockdep_one(struct net_device *dev,
79 struct netdev_queue *txq,
80 void *_unused)
81{
82 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
83}
84
85static void rose_set_lockdep_key(struct net_device *dev)
86{
87 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
88}
89
78/* 90/*
79 * Convert a ROSE address into text. 91 * Convert a ROSE address into text.
80 */ 92 */
@@ -566,13 +578,11 @@ static struct sock *rose_make_new(struct sock *osk)
566#endif 578#endif
567 579
568 sk->sk_type = osk->sk_type; 580 sk->sk_type = osk->sk_type;
569 sk->sk_socket = osk->sk_socket;
570 sk->sk_priority = osk->sk_priority; 581 sk->sk_priority = osk->sk_priority;
571 sk->sk_protocol = osk->sk_protocol; 582 sk->sk_protocol = osk->sk_protocol;
572 sk->sk_rcvbuf = osk->sk_rcvbuf; 583 sk->sk_rcvbuf = osk->sk_rcvbuf;
573 sk->sk_sndbuf = osk->sk_sndbuf; 584 sk->sk_sndbuf = osk->sk_sndbuf;
574 sk->sk_state = TCP_ESTABLISHED; 585 sk->sk_state = TCP_ESTABLISHED;
575 sk->sk_sleep = osk->sk_sleep;
576 sock_copy_flags(sk, osk); 586 sock_copy_flags(sk, osk);
577 587
578 init_timer(&rose->timer); 588 init_timer(&rose->timer);
@@ -759,7 +769,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
759 sock->state = SS_UNCONNECTED; 769 sock->state = SS_UNCONNECTED;
760 770
761 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, 771 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
762 &diagnostic); 772 &diagnostic, 0);
763 if (!rose->neighbour) { 773 if (!rose->neighbour) {
764 err = -ENETUNREACH; 774 err = -ENETUNREACH;
765 goto out_release; 775 goto out_release;
@@ -855,7 +865,7 @@ rose_try_next_neigh:
855 865
856 if (sk->sk_state != TCP_ESTABLISHED) { 866 if (sk->sk_state != TCP_ESTABLISHED) {
857 /* Try next neighbour */ 867 /* Try next neighbour */
858 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic); 868 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
859 if (rose->neighbour) 869 if (rose->neighbour)
860 goto rose_try_next_neigh; 870 goto rose_try_next_neigh;
861 871
@@ -924,14 +934,12 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
924 goto out_release; 934 goto out_release;
925 935
926 newsk = skb->sk; 936 newsk = skb->sk;
927 newsk->sk_socket = newsock; 937 sock_graft(newsk, newsock);
928 newsk->sk_sleep = &newsock->wait;
929 938
930 /* Now attach up the new socket */ 939 /* Now attach up the new socket */
931 skb->sk = NULL; 940 skb->sk = NULL;
932 kfree_skb(skb); 941 kfree_skb(skb);
933 sk->sk_ack_backlog--; 942 sk->sk_ack_backlog--;
934 newsock->sk = newsk;
935 943
936out_release: 944out_release:
937 release_sock(sk); 945 release_sock(sk);
@@ -1580,7 +1588,7 @@ static int __init rose_proto_init(void)
1580 free_netdev(dev); 1588 free_netdev(dev);
1581 goto fail; 1589 goto fail;
1582 } 1590 }
1583 lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key); 1591 rose_set_lockdep_key(dev);
1584 dev_rose[i] = dev; 1592 dev_rose[i] = dev;
1585 } 1593 }
1586 1594
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index bd593871c81e..a81066a1010a 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -662,27 +662,34 @@ struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neig
662} 662}
663 663
664/* 664/*
665 * Find a neighbour given a ROSE address. 665 * Find a neighbour or a route given a ROSE address.
666 */ 666 */
667struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, 667struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
668 unsigned char *diagnostic) 668 unsigned char *diagnostic, int new)
669{ 669{
670 struct rose_neigh *res = NULL; 670 struct rose_neigh *res = NULL;
671 struct rose_node *node; 671 struct rose_node *node;
672 int failed = 0; 672 int failed = 0;
673 int i; 673 int i;
674 674
675 spin_lock_bh(&rose_node_list_lock); 675 if (!new) spin_lock_bh(&rose_node_list_lock);
676 for (node = rose_node_list; node != NULL; node = node->next) { 676 for (node = rose_node_list; node != NULL; node = node->next) {
677 if (rosecmpm(addr, &node->address, node->mask) == 0) { 677 if (rosecmpm(addr, &node->address, node->mask) == 0) {
678 for (i = 0; i < node->count; i++) { 678 for (i = 0; i < node->count; i++) {
679 if (!rose_ftimer_running(node->neighbour[i])) { 679 if (new) {
680 res = node->neighbour[i]; 680 if (node->neighbour[i]->restarted) {
681 goto out; 681 res = node->neighbour[i];
682 } else 682 goto out;
683 failed = 1; 683 }
684 }
685 else {
686 if (!rose_ftimer_running(node->neighbour[i])) {
687 res = node->neighbour[i];
688 goto out;
689 } else
690 failed = 1;
691 }
684 } 692 }
685 break;
686 } 693 }
687 } 694 }
688 695
@@ -695,7 +702,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
695 } 702 }
696 703
697out: 704out:
698 spin_unlock_bh(&rose_node_list_lock); 705 if (!new) spin_unlock_bh(&rose_node_list_lock);
699 706
700 return res; 707 return res;
701} 708}
@@ -1018,7 +1025,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1018 rose_route = rose_route->next; 1025 rose_route = rose_route->next;
1019 } 1026 }
1020 1027
1021 if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic)) == NULL) { 1028 if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) {
1022 rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic); 1029 rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic);
1023 goto out; 1030 goto out;
1024 } 1031 }
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index f8a699e92962..f98c8027e5c1 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -21,6 +21,7 @@
21#include <net/af_rxrpc.h> 21#include <net/af_rxrpc.h>
22#include <net/ip.h> 22#include <net/ip.h>
23#include <net/udp.h> 23#include <net/udp.h>
24#include <net/net_namespace.h>
24#include "ar-internal.h" 25#include "ar-internal.h"
25 26
26unsigned long rxrpc_ack_timeout = 1; 27unsigned long rxrpc_ack_timeout = 1;
@@ -708,12 +709,12 @@ void rxrpc_data_ready(struct sock *sk, int count)
708 if (skb_checksum_complete(skb)) { 709 if (skb_checksum_complete(skb)) {
709 rxrpc_free_skb(skb); 710 rxrpc_free_skb(skb);
710 rxrpc_put_local(local); 711 rxrpc_put_local(local);
711 UDP_INC_STATS_BH(UDP_MIB_INERRORS, 0); 712 UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0);
712 _leave(" [CSUM failed]"); 713 _leave(" [CSUM failed]");
713 return; 714 return;
714 } 715 }
715 716
716 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 0); 717 UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0);
717 718
718 /* the socket buffer we have is owned by UDP, with UDP's data all over 719 /* the socket buffer we have is owned by UDP, with UDP's data all over
719 * it, but we really want our own */ 720 * it, but we really want our own */
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 9360fc81e8c7..d2b6f54a6261 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -120,6 +120,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
120{ 120{
121 struct net *net = sock_net(skb->sk); 121 struct net *net = sock_net(skb->sk);
122 struct nlattr *tca[TCA_MAX + 1]; 122 struct nlattr *tca[TCA_MAX + 1];
123 spinlock_t *root_lock;
123 struct tcmsg *t; 124 struct tcmsg *t;
124 u32 protocol; 125 u32 protocol;
125 u32 prio; 126 u32 prio;
@@ -166,7 +167,8 @@ replay:
166 167
167 /* Find qdisc */ 168 /* Find qdisc */
168 if (!parent) { 169 if (!parent) {
169 q = dev->qdisc_sleeping; 170 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
171 q = dev_queue->qdisc_sleeping;
170 parent = q->handle; 172 parent = q->handle;
171 } else { 173 } else {
172 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 174 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
@@ -203,6 +205,8 @@ replay:
203 } 205 }
204 } 206 }
205 207
208 root_lock = qdisc_root_lock(q);
209
206 if (tp == NULL) { 210 if (tp == NULL) {
207 /* Proto-tcf does not exist, create new one */ 211 /* Proto-tcf does not exist, create new one */
208 212
@@ -262,10 +266,10 @@ replay:
262 goto errout; 266 goto errout;
263 } 267 }
264 268
265 qdisc_lock_tree(dev); 269 spin_lock_bh(root_lock);
266 tp->next = *back; 270 tp->next = *back;
267 *back = tp; 271 *back = tp;
268 qdisc_unlock_tree(dev); 272 spin_unlock_bh(root_lock);
269 273
270 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) 274 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind))
271 goto errout; 275 goto errout;
@@ -274,9 +278,9 @@ replay:
274 278
275 if (fh == 0) { 279 if (fh == 0) {
276 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 280 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
277 qdisc_lock_tree(dev); 281 spin_lock_bh(root_lock);
278 *back = tp->next; 282 *back = tp->next;
279 qdisc_unlock_tree(dev); 283 spin_lock_bh(root_lock);
280 284
281 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 285 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
282 tcf_destroy(tp); 286 tcf_destroy(tp);
@@ -334,7 +338,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
334 tcm->tcm_family = AF_UNSPEC; 338 tcm->tcm_family = AF_UNSPEC;
335 tcm->tcm__pad1 = 0; 339 tcm->tcm__pad1 = 0;
336 tcm->tcm__pad1 = 0; 340 tcm->tcm__pad1 = 0;
337 tcm->tcm_ifindex = tp->q->dev->ifindex; 341 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
338 tcm->tcm_parent = tp->classid; 342 tcm->tcm_parent = tp->classid;
339 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 343 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
340 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); 344 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
@@ -390,6 +394,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
390static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 394static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
391{ 395{
392 struct net *net = sock_net(skb->sk); 396 struct net *net = sock_net(skb->sk);
397 struct netdev_queue *dev_queue;
393 int t; 398 int t;
394 int s_t; 399 int s_t;
395 struct net_device *dev; 400 struct net_device *dev;
@@ -408,8 +413,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
408 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 413 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
409 return skb->len; 414 return skb->len;
410 415
416 dev_queue = netdev_get_tx_queue(dev, 0);
411 if (!tcm->tcm_parent) 417 if (!tcm->tcm_parent)
412 q = dev->qdisc_sleeping; 418 q = dev_queue->qdisc_sleeping;
413 else 419 else
414 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 420 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
415 if (!q) 421 if (!q)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 971b867e0484..8f63a1a94014 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -36,6 +36,8 @@ struct flow_filter {
36 struct list_head list; 36 struct list_head list;
37 struct tcf_exts exts; 37 struct tcf_exts exts;
38 struct tcf_ematch_tree ematches; 38 struct tcf_ematch_tree ematches;
39 struct timer_list perturb_timer;
40 u32 perturb_period;
39 u32 handle; 41 u32 handle;
40 42
41 u32 nkeys; 43 u32 nkeys;
@@ -47,11 +49,9 @@ struct flow_filter {
47 u32 addend; 49 u32 addend;
48 u32 divisor; 50 u32 divisor;
49 u32 baseclass; 51 u32 baseclass;
52 u32 hashrnd;
50}; 53};
51 54
52static u32 flow_hashrnd __read_mostly;
53static int flow_hashrnd_initted __read_mostly;
54
55static const struct tcf_ext_map flow_ext_map = { 55static const struct tcf_ext_map flow_ext_map = {
56 .action = TCA_FLOW_ACT, 56 .action = TCA_FLOW_ACT,
57 .police = TCA_FLOW_POLICE, 57 .police = TCA_FLOW_POLICE,
@@ -348,7 +348,7 @@ static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp,
348 } 348 }
349 349
350 if (f->mode == FLOW_MODE_HASH) 350 if (f->mode == FLOW_MODE_HASH)
351 classid = jhash2(keys, f->nkeys, flow_hashrnd); 351 classid = jhash2(keys, f->nkeys, f->hashrnd);
352 else { 352 else {
353 classid = keys[0]; 353 classid = keys[0];
354 classid = (classid & f->mask) ^ f->xor; 354 classid = (classid & f->mask) ^ f->xor;
@@ -369,6 +369,15 @@ static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp,
369 return -1; 369 return -1;
370} 370}
371 371
372static void flow_perturbation(unsigned long arg)
373{
374 struct flow_filter *f = (struct flow_filter *)arg;
375
376 get_random_bytes(&f->hashrnd, 4);
377 if (f->perturb_period)
378 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
379}
380
372static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { 381static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
373 [TCA_FLOW_KEYS] = { .type = NLA_U32 }, 382 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
374 [TCA_FLOW_MODE] = { .type = NLA_U32 }, 383 [TCA_FLOW_MODE] = { .type = NLA_U32 },
@@ -381,6 +390,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
381 [TCA_FLOW_ACT] = { .type = NLA_NESTED }, 390 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
382 [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, 391 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
383 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, 392 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
393 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
384}; 394};
385 395
386static int flow_change(struct tcf_proto *tp, unsigned long base, 396static int flow_change(struct tcf_proto *tp, unsigned long base,
@@ -394,6 +404,7 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
394 struct tcf_exts e; 404 struct tcf_exts e;
395 struct tcf_ematch_tree t; 405 struct tcf_ematch_tree t;
396 unsigned int nkeys = 0; 406 unsigned int nkeys = 0;
407 unsigned int perturb_period = 0;
397 u32 baseclass = 0; 408 u32 baseclass = 0;
398 u32 keymask = 0; 409 u32 keymask = 0;
399 u32 mode; 410 u32 mode;
@@ -442,6 +453,14 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
442 mode = nla_get_u32(tb[TCA_FLOW_MODE]); 453 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
443 if (mode != FLOW_MODE_HASH && nkeys > 1) 454 if (mode != FLOW_MODE_HASH && nkeys > 1)
444 goto err2; 455 goto err2;
456
457 if (mode == FLOW_MODE_HASH)
458 perturb_period = f->perturb_period;
459 if (tb[TCA_FLOW_PERTURB]) {
460 if (mode != FLOW_MODE_HASH)
461 goto err2;
462 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
463 }
445 } else { 464 } else {
446 err = -EINVAL; 465 err = -EINVAL;
447 if (!handle) 466 if (!handle)
@@ -455,6 +474,12 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
455 if (mode != FLOW_MODE_HASH && nkeys > 1) 474 if (mode != FLOW_MODE_HASH && nkeys > 1)
456 goto err2; 475 goto err2;
457 476
477 if (tb[TCA_FLOW_PERTURB]) {
478 if (mode != FLOW_MODE_HASH)
479 goto err2;
480 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
481 }
482
458 if (TC_H_MAJ(baseclass) == 0) 483 if (TC_H_MAJ(baseclass) == 0)
459 baseclass = TC_H_MAKE(tp->q->handle, baseclass); 484 baseclass = TC_H_MAKE(tp->q->handle, baseclass);
460 if (TC_H_MIN(baseclass) == 0) 485 if (TC_H_MIN(baseclass) == 0)
@@ -467,6 +492,11 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
467 492
468 f->handle = handle; 493 f->handle = handle;
469 f->mask = ~0U; 494 f->mask = ~0U;
495
496 get_random_bytes(&f->hashrnd, 4);
497 f->perturb_timer.function = flow_perturbation;
498 f->perturb_timer.data = (unsigned long)f;
499 init_timer_deferrable(&f->perturb_timer);
470 } 500 }
471 501
472 tcf_exts_change(tp, &f->exts, &e); 502 tcf_exts_change(tp, &f->exts, &e);
@@ -495,6 +525,11 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
495 if (baseclass) 525 if (baseclass)
496 f->baseclass = baseclass; 526 f->baseclass = baseclass;
497 527
528 f->perturb_period = perturb_period;
529 del_timer(&f->perturb_timer);
530 if (perturb_period)
531 mod_timer(&f->perturb_timer, jiffies + perturb_period);
532
498 if (*arg == 0) 533 if (*arg == 0)
499 list_add_tail(&f->list, &head->filters); 534 list_add_tail(&f->list, &head->filters);
500 535
@@ -512,6 +547,7 @@ err1:
512 547
513static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) 548static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
514{ 549{
550 del_timer_sync(&f->perturb_timer);
515 tcf_exts_destroy(tp, &f->exts); 551 tcf_exts_destroy(tp, &f->exts);
516 tcf_em_tree_destroy(tp, &f->ematches); 552 tcf_em_tree_destroy(tp, &f->ematches);
517 kfree(f); 553 kfree(f);
@@ -532,11 +568,6 @@ static int flow_init(struct tcf_proto *tp)
532{ 568{
533 struct flow_head *head; 569 struct flow_head *head;
534 570
535 if (!flow_hashrnd_initted) {
536 get_random_bytes(&flow_hashrnd, 4);
537 flow_hashrnd_initted = 1;
538 }
539
540 head = kzalloc(sizeof(*head), GFP_KERNEL); 571 head = kzalloc(sizeof(*head), GFP_KERNEL);
541 if (head == NULL) 572 if (head == NULL)
542 return -ENOBUFS; 573 return -ENOBUFS;
@@ -605,6 +636,9 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
605 if (f->baseclass) 636 if (f->baseclass)
606 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); 637 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
607 638
639 if (f->perturb_period)
640 NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
641
608 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) 642 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
609 goto nla_put_failure; 643 goto nla_put_failure;
610#ifdef CONFIG_NET_EMATCH 644#ifdef CONFIG_NET_EMATCH
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 784dcb870b98..481260a4f10f 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -73,11 +73,13 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
73} 73}
74 74
75static inline 75static inline
76void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id) 76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
77{ 77{
78 qdisc_lock_tree(dev); 78 spinlock_t *root_lock = qdisc_root_lock(q);
79
80 spin_lock_bh(root_lock);
79 memset(head->fastmap, 0, sizeof(head->fastmap)); 81 memset(head->fastmap, 0, sizeof(head->fastmap));
80 qdisc_unlock_tree(dev); 82 spin_unlock_bh(root_lock);
81} 83}
82 84
83static inline void 85static inline void
@@ -302,7 +304,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
302 *fp = f->next; 304 *fp = f->next;
303 tcf_tree_unlock(tp); 305 tcf_tree_unlock(tp);
304 306
305 route4_reset_fastmap(tp->q->dev, head, f->id); 307 route4_reset_fastmap(tp->q, head, f->id);
306 route4_delete_filter(tp, f); 308 route4_delete_filter(tp, f);
307 309
308 /* Strip tree */ 310 /* Strip tree */
@@ -500,7 +502,7 @@ reinsert:
500 } 502 }
501 tcf_tree_unlock(tp); 503 tcf_tree_unlock(tp);
502 504
503 route4_reset_fastmap(tp->q->dev, head, f->id); 505 route4_reset_fastmap(tp->q, head, f->id);
504 *arg = (unsigned long)f; 506 *arg = (unsigned long)f;
505 return 0; 507 return 0;
506 508
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 10f01ad04380..b3ef8307204e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -99,7 +99,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
99 ---requeue 99 ---requeue
100 100
101 requeues once dequeued packet. It is used for non-standard or 101 requeues once dequeued packet. It is used for non-standard or
102 just buggy devices, which can defer output even if dev->tbusy=0. 102 just buggy devices, which can defer output even if netif_queue_stopped()=0.
103 103
104 ---reset 104 ---reset
105 105
@@ -281,11 +281,10 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
281{ 281{
282 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 282 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
283 timer); 283 timer);
284 struct net_device *dev = wd->qdisc->dev;
285 284
286 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 285 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
287 smp_wmb(); 286 smp_wmb();
288 netif_schedule(dev); 287 __netif_schedule(wd->qdisc);
289 288
290 return HRTIMER_NORESTART; 289 return HRTIMER_NORESTART;
291} 290}
@@ -316,6 +315,110 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
316} 315}
317EXPORT_SYMBOL(qdisc_watchdog_cancel); 316EXPORT_SYMBOL(qdisc_watchdog_cancel);
318 317
318struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
319{
320 unsigned int size = n * sizeof(struct hlist_head), i;
321 struct hlist_head *h;
322
323 if (size <= PAGE_SIZE)
324 h = kmalloc(size, GFP_KERNEL);
325 else
326 h = (struct hlist_head *)
327 __get_free_pages(GFP_KERNEL, get_order(size));
328
329 if (h != NULL) {
330 for (i = 0; i < n; i++)
331 INIT_HLIST_HEAD(&h[i]);
332 }
333 return h;
334}
335
336static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
337{
338 unsigned int size = n * sizeof(struct hlist_head);
339
340 if (size <= PAGE_SIZE)
341 kfree(h);
342 else
343 free_pages((unsigned long)h, get_order(size));
344}
345
346void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
347{
348 struct Qdisc_class_common *cl;
349 struct hlist_node *n, *next;
350 struct hlist_head *nhash, *ohash;
351 unsigned int nsize, nmask, osize;
352 unsigned int i, h;
353
354 /* Rehash when load factor exceeds 0.75 */
355 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
356 return;
357 nsize = clhash->hashsize * 2;
358 nmask = nsize - 1;
359 nhash = qdisc_class_hash_alloc(nsize);
360 if (nhash == NULL)
361 return;
362
363 ohash = clhash->hash;
364 osize = clhash->hashsize;
365
366 sch_tree_lock(sch);
367 for (i = 0; i < osize; i++) {
368 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
369 h = qdisc_class_hash(cl->classid, nmask);
370 hlist_add_head(&cl->hnode, &nhash[h]);
371 }
372 }
373 clhash->hash = nhash;
374 clhash->hashsize = nsize;
375 clhash->hashmask = nmask;
376 sch_tree_unlock(sch);
377
378 qdisc_class_hash_free(ohash, osize);
379}
380EXPORT_SYMBOL(qdisc_class_hash_grow);
381
382int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
383{
384 unsigned int size = 4;
385
386 clhash->hash = qdisc_class_hash_alloc(size);
387 if (clhash->hash == NULL)
388 return -ENOMEM;
389 clhash->hashsize = size;
390 clhash->hashmask = size - 1;
391 clhash->hashelems = 0;
392 return 0;
393}
394EXPORT_SYMBOL(qdisc_class_hash_init);
395
396void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
397{
398 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
399}
400EXPORT_SYMBOL(qdisc_class_hash_destroy);
401
402void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
403 struct Qdisc_class_common *cl)
404{
405 unsigned int h;
406
407 INIT_HLIST_NODE(&cl->hnode);
408 h = qdisc_class_hash(cl->classid, clhash->hashmask);
409 hlist_add_head(&cl->hnode, &clhash->hash[h]);
410 clhash->hashelems++;
411}
412EXPORT_SYMBOL(qdisc_class_hash_insert);
413
414void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
415 struct Qdisc_class_common *cl)
416{
417 hlist_del(&cl->hnode);
418 clhash->hashelems--;
419}
420EXPORT_SYMBOL(qdisc_class_hash_remove);
421
319/* Allocate an unique handle from space managed by kernel */ 422/* Allocate an unique handle from space managed by kernel */
320 423
321static u32 qdisc_alloc_handle(struct net_device *dev) 424static u32 qdisc_alloc_handle(struct net_device *dev)
@@ -332,32 +435,39 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
332 return i>0 ? autohandle : 0; 435 return i>0 ? autohandle : 0;
333} 436}
334 437
335/* Attach toplevel qdisc to device dev */ 438/* Attach toplevel qdisc to device queue. */
336 439
337static struct Qdisc * 440static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
338dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) 441 struct Qdisc *qdisc)
339{ 442{
443 spinlock_t *root_lock;
340 struct Qdisc *oqdisc; 444 struct Qdisc *oqdisc;
445 int ingress;
341 446
342 if (dev->flags & IFF_UP) 447 ingress = 0;
343 dev_deactivate(dev); 448 if (qdisc && qdisc->flags&TCQ_F_INGRESS)
449 ingress = 1;
344 450
345 qdisc_lock_tree(dev); 451 if (ingress) {
346 if (qdisc && qdisc->flags&TCQ_F_INGRESS) { 452 oqdisc = dev_queue->qdisc;
347 oqdisc = dev->qdisc_ingress; 453 } else {
454 oqdisc = dev_queue->qdisc_sleeping;
455 }
456
457 root_lock = qdisc_root_lock(oqdisc);
458 spin_lock_bh(root_lock);
459
460 if (ingress) {
348 /* Prune old scheduler */ 461 /* Prune old scheduler */
349 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { 462 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
350 /* delete */ 463 /* delete */
351 qdisc_reset(oqdisc); 464 qdisc_reset(oqdisc);
352 dev->qdisc_ingress = NULL; 465 dev_queue->qdisc = NULL;
353 } else { /* new */ 466 } else { /* new */
354 dev->qdisc_ingress = qdisc; 467 dev_queue->qdisc = qdisc;
355 } 468 }
356 469
357 } else { 470 } else {
358
359 oqdisc = dev->qdisc_sleeping;
360
361 /* Prune old scheduler */ 471 /* Prune old scheduler */
362 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 472 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
363 qdisc_reset(oqdisc); 473 qdisc_reset(oqdisc);
@@ -365,14 +475,11 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
365 /* ... and graft new one */ 475 /* ... and graft new one */
366 if (qdisc == NULL) 476 if (qdisc == NULL)
367 qdisc = &noop_qdisc; 477 qdisc = &noop_qdisc;
368 dev->qdisc_sleeping = qdisc; 478 dev_queue->qdisc_sleeping = qdisc;
369 dev->qdisc = &noop_qdisc; 479 dev_queue->qdisc = &noop_qdisc;
370 } 480 }
371 481
372 qdisc_unlock_tree(dev); 482 spin_unlock_bh(root_lock);
373
374 if (dev->flags & IFF_UP)
375 dev_activate(dev);
376 483
377 return oqdisc; 484 return oqdisc;
378} 485}
@@ -389,7 +496,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
389 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) 496 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
390 return; 497 return;
391 498
392 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); 499 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
393 if (sch == NULL) { 500 if (sch == NULL) {
394 WARN_ON(parentid != TC_H_ROOT); 501 WARN_ON(parentid != TC_H_ROOT);
395 return; 502 return;
@@ -405,26 +512,66 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
405} 512}
406EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 513EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
407 514
408/* Graft qdisc "new" to class "classid" of qdisc "parent" or 515static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
409 to device "dev". 516 struct Qdisc *old, struct Qdisc *new)
517{
518 if (new || old)
519 qdisc_notify(skb, n, clid, old, new);
410 520
411 Old qdisc is not destroyed but returned in *old. 521 if (old) {
522 spin_lock_bh(&old->q.lock);
523 qdisc_destroy(old);
524 spin_unlock_bh(&old->q.lock);
525 }
526}
527
528/* Graft qdisc "new" to class "classid" of qdisc "parent" or
529 * to device "dev".
530 *
531 * When appropriate send a netlink notification using 'skb'
532 * and "n".
533 *
534 * On success, destroy old qdisc.
412 */ 535 */
413 536
414static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, 537static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
415 u32 classid, 538 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
416 struct Qdisc *new, struct Qdisc **old) 539 struct Qdisc *new, struct Qdisc *old)
417{ 540{
541 struct Qdisc *q = old;
418 int err = 0; 542 int err = 0;
419 struct Qdisc *q = *old;
420
421 543
422 if (parent == NULL) { 544 if (parent == NULL) {
423 if (q && q->flags&TCQ_F_INGRESS) { 545 unsigned int i, num_q, ingress;
424 *old = dev_graft_qdisc(dev, q); 546
425 } else { 547 ingress = 0;
426 *old = dev_graft_qdisc(dev, new); 548 num_q = dev->num_tx_queues;
549 if (q && q->flags & TCQ_F_INGRESS) {
550 num_q = 1;
551 ingress = 1;
552 }
553
554 if (dev->flags & IFF_UP)
555 dev_deactivate(dev);
556
557 for (i = 0; i < num_q; i++) {
558 struct netdev_queue *dev_queue = &dev->rx_queue;
559
560 if (!ingress)
561 dev_queue = netdev_get_tx_queue(dev, i);
562
563 if (ingress) {
564 old = dev_graft_qdisc(dev_queue, q);
565 } else {
566 old = dev_graft_qdisc(dev_queue, new);
567 if (new && i > 0)
568 atomic_inc(&new->refcnt);
569 }
570 notify_and_destroy(skb, n, classid, old, new);
427 } 571 }
572
573 if (dev->flags & IFF_UP)
574 dev_activate(dev);
428 } else { 575 } else {
429 const struct Qdisc_class_ops *cops = parent->ops->cl_ops; 576 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
430 577
@@ -433,10 +580,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
433 if (cops) { 580 if (cops) {
434 unsigned long cl = cops->get(parent, classid); 581 unsigned long cl = cops->get(parent, classid);
435 if (cl) { 582 if (cl) {
436 err = cops->graft(parent, cl, new, old); 583 err = cops->graft(parent, cl, new, &old);
437 cops->put(parent, cl); 584 cops->put(parent, cl);
438 } 585 }
439 } 586 }
587 if (!err)
588 notify_and_destroy(skb, n, classid, old, new);
440 } 589 }
441 return err; 590 return err;
442} 591}
@@ -448,8 +597,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
448 */ 597 */
449 598
450static struct Qdisc * 599static struct Qdisc *
451qdisc_create(struct net_device *dev, u32 parent, u32 handle, 600qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
452 struct nlattr **tca, int *errp) 601 u32 parent, u32 handle, struct nlattr **tca, int *errp)
453{ 602{
454 int err; 603 int err;
455 struct nlattr *kind = tca[TCA_KIND]; 604 struct nlattr *kind = tca[TCA_KIND];
@@ -489,7 +638,7 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
489 if (ops == NULL) 638 if (ops == NULL)
490 goto err_out; 639 goto err_out;
491 640
492 sch = qdisc_alloc(dev, ops); 641 sch = qdisc_alloc(dev_queue, ops);
493 if (IS_ERR(sch)) { 642 if (IS_ERR(sch)) {
494 err = PTR_ERR(sch); 643 err = PTR_ERR(sch);
495 goto err_out2; 644 goto err_out2;
@@ -499,10 +648,8 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
499 648
500 if (handle == TC_H_INGRESS) { 649 if (handle == TC_H_INGRESS) {
501 sch->flags |= TCQ_F_INGRESS; 650 sch->flags |= TCQ_F_INGRESS;
502 sch->stats_lock = &dev->ingress_lock;
503 handle = TC_H_MAKE(TC_H_INGRESS, 0); 651 handle = TC_H_MAKE(TC_H_INGRESS, 0);
504 } else { 652 } else {
505 sch->stats_lock = &dev->queue_lock;
506 if (handle == 0) { 653 if (handle == 0) {
507 handle = qdisc_alloc_handle(dev); 654 handle = qdisc_alloc_handle(dev);
508 err = -ENOMEM; 655 err = -ENOMEM;
@@ -516,7 +663,7 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
516 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { 663 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
517 if (tca[TCA_RATE]) { 664 if (tca[TCA_RATE]) {
518 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 665 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
519 sch->stats_lock, 666 qdisc_root_lock(sch),
520 tca[TCA_RATE]); 667 tca[TCA_RATE]);
521 if (err) { 668 if (err) {
522 /* 669 /*
@@ -529,9 +676,9 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
529 goto err_out3; 676 goto err_out3;
530 } 677 }
531 } 678 }
532 qdisc_lock_tree(dev); 679 spin_lock_bh(&dev->qdisc_list_lock);
533 list_add_tail(&sch->list, &dev->qdisc_list); 680 list_add_tail(&sch->list, &dev->qdisc_list);
534 qdisc_unlock_tree(dev); 681 spin_unlock_bh(&dev->qdisc_list_lock);
535 682
536 return sch; 683 return sch;
537 } 684 }
@@ -558,7 +705,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
558 } 705 }
559 if (tca[TCA_RATE]) 706 if (tca[TCA_RATE])
560 gen_replace_estimator(&sch->bstats, &sch->rate_est, 707 gen_replace_estimator(&sch->bstats, &sch->rate_est,
561 sch->stats_lock, tca[TCA_RATE]); 708 qdisc_root_lock(sch), tca[TCA_RATE]);
562 return 0; 709 return 0;
563} 710}
564 711
@@ -634,10 +781,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
634 return -ENOENT; 781 return -ENOENT;
635 q = qdisc_leaf(p, clid); 782 q = qdisc_leaf(p, clid);
636 } else { /* ingress */ 783 } else { /* ingress */
637 q = dev->qdisc_ingress; 784 q = dev->rx_queue.qdisc;
638 } 785 }
639 } else { 786 } else {
640 q = dev->qdisc_sleeping; 787 struct netdev_queue *dev_queue;
788 dev_queue = netdev_get_tx_queue(dev, 0);
789 q = dev_queue->qdisc_sleeping;
641 } 790 }
642 if (!q) 791 if (!q)
643 return -ENOENT; 792 return -ENOENT;
@@ -657,14 +806,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
657 return -EINVAL; 806 return -EINVAL;
658 if (q->handle == 0) 807 if (q->handle == 0)
659 return -ENOENT; 808 return -ENOENT;
660 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) 809 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
661 return err; 810 return err;
662 if (q) {
663 qdisc_notify(skb, n, clid, q, NULL);
664 qdisc_lock_tree(dev);
665 qdisc_destroy(q);
666 qdisc_unlock_tree(dev);
667 }
668 } else { 811 } else {
669 qdisc_notify(skb, n, clid, NULL, q); 812 qdisc_notify(skb, n, clid, NULL, q);
670 } 813 }
@@ -708,10 +851,12 @@ replay:
708 return -ENOENT; 851 return -ENOENT;
709 q = qdisc_leaf(p, clid); 852 q = qdisc_leaf(p, clid);
710 } else { /*ingress */ 853 } else { /*ingress */
711 q = dev->qdisc_ingress; 854 q = dev->rx_queue.qdisc;
712 } 855 }
713 } else { 856 } else {
714 q = dev->qdisc_sleeping; 857 struct netdev_queue *dev_queue;
858 dev_queue = netdev_get_tx_queue(dev, 0);
859 q = dev_queue->qdisc_sleeping;
715 } 860 }
716 861
717 /* It may be default qdisc, ignore it */ 862 /* It may be default qdisc, ignore it */
@@ -788,10 +933,12 @@ create_n_graft:
788 if (!(n->nlmsg_flags&NLM_F_CREATE)) 933 if (!(n->nlmsg_flags&NLM_F_CREATE))
789 return -ENOENT; 934 return -ENOENT;
790 if (clid == TC_H_INGRESS) 935 if (clid == TC_H_INGRESS)
791 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent, 936 q = qdisc_create(dev, &dev->rx_queue,
937 tcm->tcm_parent, tcm->tcm_parent,
792 tca, &err); 938 tca, &err);
793 else 939 else
794 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle, 940 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
941 tcm->tcm_parent, tcm->tcm_handle,
795 tca, &err); 942 tca, &err);
796 if (q == NULL) { 943 if (q == NULL) {
797 if (err == -EAGAIN) 944 if (err == -EAGAIN)
@@ -801,22 +948,18 @@ create_n_graft:
801 948
802graft: 949graft:
803 if (1) { 950 if (1) {
804 struct Qdisc *old_q = NULL; 951 spinlock_t *root_lock;
805 err = qdisc_graft(dev, p, clid, q, &old_q); 952
953 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
806 if (err) { 954 if (err) {
807 if (q) { 955 if (q) {
808 qdisc_lock_tree(dev); 956 root_lock = qdisc_root_lock(q);
957 spin_lock_bh(root_lock);
809 qdisc_destroy(q); 958 qdisc_destroy(q);
810 qdisc_unlock_tree(dev); 959 spin_unlock_bh(root_lock);
811 } 960 }
812 return err; 961 return err;
813 } 962 }
814 qdisc_notify(skb, n, clid, old_q, q);
815 if (old_q) {
816 qdisc_lock_tree(dev);
817 qdisc_destroy(old_q);
818 qdisc_unlock_tree(dev);
819 }
820 } 963 }
821 return 0; 964 return 0;
822} 965}
@@ -834,7 +977,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
834 tcm->tcm_family = AF_UNSPEC; 977 tcm->tcm_family = AF_UNSPEC;
835 tcm->tcm__pad1 = 0; 978 tcm->tcm__pad1 = 0;
836 tcm->tcm__pad2 = 0; 979 tcm->tcm__pad2 = 0;
837 tcm->tcm_ifindex = q->dev->ifindex; 980 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
838 tcm->tcm_parent = clid; 981 tcm->tcm_parent = clid;
839 tcm->tcm_handle = q->handle; 982 tcm->tcm_handle = q->handle;
840 tcm->tcm_info = atomic_read(&q->refcnt); 983 tcm->tcm_info = atomic_read(&q->refcnt);
@@ -844,7 +987,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
844 q->qstats.qlen = q->q.qlen; 987 q->qstats.qlen = q->q.qlen;
845 988
846 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 989 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
847 TCA_XSTATS, q->stats_lock, &d) < 0) 990 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
848 goto nla_put_failure; 991 goto nla_put_failure;
849 992
850 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 993 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -949,6 +1092,7 @@ done:
949static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1092static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
950{ 1093{
951 struct net *net = sock_net(skb->sk); 1094 struct net *net = sock_net(skb->sk);
1095 struct netdev_queue *dev_queue;
952 struct tcmsg *tcm = NLMSG_DATA(n); 1096 struct tcmsg *tcm = NLMSG_DATA(n);
953 struct nlattr *tca[TCA_MAX + 1]; 1097 struct nlattr *tca[TCA_MAX + 1];
954 struct net_device *dev; 1098 struct net_device *dev;
@@ -986,6 +1130,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
986 1130
987 /* Step 1. Determine qdisc handle X:0 */ 1131 /* Step 1. Determine qdisc handle X:0 */
988 1132
1133 dev_queue = netdev_get_tx_queue(dev, 0);
989 if (pid != TC_H_ROOT) { 1134 if (pid != TC_H_ROOT) {
990 u32 qid1 = TC_H_MAJ(pid); 1135 u32 qid1 = TC_H_MAJ(pid);
991 1136
@@ -996,7 +1141,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
996 } else if (qid1) { 1141 } else if (qid1) {
997 qid = qid1; 1142 qid = qid1;
998 } else if (qid == 0) 1143 } else if (qid == 0)
999 qid = dev->qdisc_sleeping->handle; 1144 qid = dev_queue->qdisc_sleeping->handle;
1000 1145
1001 /* Now qid is genuine qdisc handle consistent 1146 /* Now qid is genuine qdisc handle consistent
1002 both with parent and child. 1147 both with parent and child.
@@ -1007,7 +1152,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1007 pid = TC_H_MAKE(qid, pid); 1152 pid = TC_H_MAKE(qid, pid);
1008 } else { 1153 } else {
1009 if (qid == 0) 1154 if (qid == 0)
1010 qid = dev->qdisc_sleeping->handle; 1155 qid = dev_queue->qdisc_sleeping->handle;
1011 } 1156 }
1012 1157
1013 /* OK. Locate qdisc */ 1158 /* OK. Locate qdisc */
@@ -1080,7 +1225,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1080 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1225 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1081 tcm = NLMSG_DATA(nlh); 1226 tcm = NLMSG_DATA(nlh);
1082 tcm->tcm_family = AF_UNSPEC; 1227 tcm->tcm_family = AF_UNSPEC;
1083 tcm->tcm_ifindex = q->dev->ifindex; 1228 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1084 tcm->tcm_parent = q->handle; 1229 tcm->tcm_parent = q->handle;
1085 tcm->tcm_handle = q->handle; 1230 tcm->tcm_handle = q->handle;
1086 tcm->tcm_info = 0; 1231 tcm->tcm_info = 0;
@@ -1089,7 +1234,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1089 goto nla_put_failure; 1234 goto nla_put_failure;
1090 1235
1091 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1236 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1092 TCA_XSTATS, q->stats_lock, &d) < 0) 1237 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
1093 goto nla_put_failure; 1238 goto nla_put_failure;
1094 1239
1095 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1240 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index db0e23ae85f8..0de757e3be4a 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -296,7 +296,8 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
296 goto err_out; 296 goto err_out;
297 } 297 }
298 flow->filter_list = NULL; 298 flow->filter_list = NULL;
299 flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 299 flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
300 &pfifo_qdisc_ops, classid);
300 if (!flow->q) 301 if (!flow->q)
301 flow->q = &noop_qdisc; 302 flow->q = &noop_qdisc;
302 pr_debug("atm_tc_change: qdisc %p\n", flow->q); 303 pr_debug("atm_tc_change: qdisc %p\n", flow->q);
@@ -555,7 +556,8 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
555 556
556 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); 557 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
557 p->flows = &p->link; 558 p->flows = &p->link;
558 p->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); 559 p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
560 &pfifo_qdisc_ops, sch->handle);
559 if (!p->link.q) 561 if (!p->link.q)
560 p->link.q = &noop_qdisc; 562 p->link.q = &noop_qdisc;
561 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); 563 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 2a3c97f7dc63..a3953bbe2d79 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -73,11 +73,10 @@ struct cbq_sched_data;
73 73
74struct cbq_class 74struct cbq_class
75{ 75{
76 struct cbq_class *next; /* hash table link */ 76 struct Qdisc_class_common common;
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */ 77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78 78
79/* Parameters */ 79/* Parameters */
80 u32 classid;
81 unsigned char priority; /* class priority */ 80 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */ 81 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */ 82 unsigned char ewma_log; /* time constant for idle time calculation */
@@ -144,7 +143,7 @@ struct cbq_class
144 143
145struct cbq_sched_data 144struct cbq_sched_data
146{ 145{
147 struct cbq_class *classes[16]; /* Hash table of all classes */ 146 struct Qdisc_class_hash clhash; /* Hash table of all classes */
148 int nclasses[TC_CBQ_MAXPRIO+1]; 147 int nclasses[TC_CBQ_MAXPRIO+1];
149 unsigned quanta[TC_CBQ_MAXPRIO+1]; 148 unsigned quanta[TC_CBQ_MAXPRIO+1];
150 149
@@ -177,23 +176,15 @@ struct cbq_sched_data
177 176
178#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) 177#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)
179 178
180
181static __inline__ unsigned cbq_hash(u32 h)
182{
183 h ^= h>>8;
184 h ^= h>>4;
185 return h&0xF;
186}
187
188static __inline__ struct cbq_class * 179static __inline__ struct cbq_class *
189cbq_class_lookup(struct cbq_sched_data *q, u32 classid) 180cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
190{ 181{
191 struct cbq_class *cl; 182 struct Qdisc_class_common *clc;
192 183
193 for (cl = q->classes[cbq_hash(classid)]; cl; cl = cl->next) 184 clc = qdisc_class_find(&q->clhash, classid);
194 if (cl->classid == classid) 185 if (clc == NULL)
195 return cl; 186 return NULL;
196 return NULL; 187 return container_of(clc, struct cbq_class, common);
197} 188}
198 189
199#ifdef CONFIG_NET_CLS_ACT 190#ifdef CONFIG_NET_CLS_ACT
@@ -659,7 +650,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
659 } 650 }
660 651
661 sch->flags &= ~TCQ_F_THROTTLED; 652 sch->flags &= ~TCQ_F_THROTTLED;
662 netif_schedule(sch->dev); 653 __netif_schedule(sch);
663 return HRTIMER_NORESTART; 654 return HRTIMER_NORESTART;
664} 655}
665 656
@@ -1071,13 +1062,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
1071static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 1062static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1072{ 1063{
1073 struct cbq_class *cl; 1064 struct cbq_class *cl;
1074 unsigned h; 1065 struct hlist_node *n;
1066 unsigned int h;
1075 1067
1076 if (q->quanta[prio] == 0) 1068 if (q->quanta[prio] == 0)
1077 return; 1069 return;
1078 1070
1079 for (h=0; h<16; h++) { 1071 for (h = 0; h < q->clhash.hashsize; h++) {
1080 for (cl = q->classes[h]; cl; cl = cl->next) { 1072 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1081 /* BUGGGG... Beware! This expression suffer of 1073 /* BUGGGG... Beware! This expression suffer of
1082 arithmetic overflows! 1074 arithmetic overflows!
1083 */ 1075 */
@@ -1085,9 +1077,9 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1085 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ 1077 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1086 q->quanta[prio]; 1078 q->quanta[prio];
1087 } 1079 }
1088 if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) { 1080 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
1089 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->classid, cl->quantum); 1081 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
1090 cl->quantum = cl->qdisc->dev->mtu/2 + 1; 1082 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1091 } 1083 }
1092 } 1084 }
1093 } 1085 }
@@ -1114,10 +1106,12 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1114 if (split->defaults[i]) 1106 if (split->defaults[i])
1115 continue; 1107 continue;
1116 1108
1117 for (h=0; h<16; h++) { 1109 for (h = 0; h < q->clhash.hashsize; h++) {
1110 struct hlist_node *n;
1118 struct cbq_class *c; 1111 struct cbq_class *c;
1119 1112
1120 for (c = q->classes[h]; c; c = c->next) { 1113 hlist_for_each_entry(c, n, &q->clhash.hash[h],
1114 common.hnode) {
1121 if (c->split == split && c->level < level && 1115 if (c->split == split && c->level < level &&
1122 c->defmap&(1<<i)) { 1116 c->defmap&(1<<i)) {
1123 split->defaults[i] = c; 1117 split->defaults[i] = c;
@@ -1135,12 +1129,12 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
1135 if (splitid == 0) { 1129 if (splitid == 0) {
1136 if ((split = cl->split) == NULL) 1130 if ((split = cl->split) == NULL)
1137 return; 1131 return;
1138 splitid = split->classid; 1132 splitid = split->common.classid;
1139 } 1133 }
1140 1134
1141 if (split == NULL || split->classid != splitid) { 1135 if (split == NULL || split->common.classid != splitid) {
1142 for (split = cl->tparent; split; split = split->tparent) 1136 for (split = cl->tparent; split; split = split->tparent)
1143 if (split->classid == splitid) 1137 if (split->common.classid == splitid)
1144 break; 1138 break;
1145 } 1139 }
1146 1140
@@ -1163,13 +1157,7 @@ static void cbq_unlink_class(struct cbq_class *this)
1163 struct cbq_class *cl, **clp; 1157 struct cbq_class *cl, **clp;
1164 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 1158 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1165 1159
1166 for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) { 1160 qdisc_class_hash_remove(&q->clhash, &this->common);
1167 if (cl == this) {
1168 *clp = cl->next;
1169 cl->next = NULL;
1170 break;
1171 }
1172 }
1173 1161
1174 if (this->tparent) { 1162 if (this->tparent) {
1175 clp=&this->sibling; 1163 clp=&this->sibling;
@@ -1195,12 +1183,10 @@ static void cbq_unlink_class(struct cbq_class *this)
1195static void cbq_link_class(struct cbq_class *this) 1183static void cbq_link_class(struct cbq_class *this)
1196{ 1184{
1197 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 1185 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1198 unsigned h = cbq_hash(this->classid);
1199 struct cbq_class *parent = this->tparent; 1186 struct cbq_class *parent = this->tparent;
1200 1187
1201 this->sibling = this; 1188 this->sibling = this;
1202 this->next = q->classes[h]; 1189 qdisc_class_hash_insert(&q->clhash, &this->common);
1203 q->classes[h] = this;
1204 1190
1205 if (parent == NULL) 1191 if (parent == NULL)
1206 return; 1192 return;
@@ -1242,6 +1228,7 @@ cbq_reset(struct Qdisc* sch)
1242{ 1228{
1243 struct cbq_sched_data *q = qdisc_priv(sch); 1229 struct cbq_sched_data *q = qdisc_priv(sch);
1244 struct cbq_class *cl; 1230 struct cbq_class *cl;
1231 struct hlist_node *n;
1245 int prio; 1232 int prio;
1246 unsigned h; 1233 unsigned h;
1247 1234
@@ -1258,8 +1245,8 @@ cbq_reset(struct Qdisc* sch)
1258 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) 1245 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1259 q->active[prio] = NULL; 1246 q->active[prio] = NULL;
1260 1247
1261 for (h = 0; h < 16; h++) { 1248 for (h = 0; h < q->clhash.hashsize; h++) {
1262 for (cl = q->classes[h]; cl; cl = cl->next) { 1249 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1263 qdisc_reset(cl->q); 1250 qdisc_reset(cl->q);
1264 1251
1265 cl->next_alive = NULL; 1252 cl->next_alive = NULL;
@@ -1406,11 +1393,16 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1406 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) 1393 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1407 return -EINVAL; 1394 return -EINVAL;
1408 1395
1396 err = qdisc_class_hash_init(&q->clhash);
1397 if (err < 0)
1398 goto put_rtab;
1399
1409 q->link.refcnt = 1; 1400 q->link.refcnt = 1;
1410 q->link.sibling = &q->link; 1401 q->link.sibling = &q->link;
1411 q->link.classid = sch->handle; 1402 q->link.common.classid = sch->handle;
1412 q->link.qdisc = sch; 1403 q->link.qdisc = sch;
1413 if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1404 if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1405 &pfifo_qdisc_ops,
1414 sch->handle))) 1406 sch->handle)))
1415 q->link.q = &noop_qdisc; 1407 q->link.q = &noop_qdisc;
1416 1408
@@ -1419,7 +1411,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1419 q->link.cpriority = TC_CBQ_MAXPRIO-1; 1411 q->link.cpriority = TC_CBQ_MAXPRIO-1;
1420 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; 1412 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1421 q->link.overlimit = cbq_ovl_classic; 1413 q->link.overlimit = cbq_ovl_classic;
1422 q->link.allot = psched_mtu(sch->dev); 1414 q->link.allot = psched_mtu(qdisc_dev(sch));
1423 q->link.quantum = q->link.allot; 1415 q->link.quantum = q->link.allot;
1424 q->link.weight = q->link.R_tab->rate.rate; 1416 q->link.weight = q->link.R_tab->rate.rate;
1425 1417
@@ -1441,6 +1433,10 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1441 1433
1442 cbq_addprio(q, &q->link); 1434 cbq_addprio(q, &q->link);
1443 return 0; 1435 return 0;
1436
1437put_rtab:
1438 qdisc_put_rtab(q->link.R_tab);
1439 return err;
1444} 1440}
1445 1441
1446static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) 1442static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
@@ -1521,7 +1517,7 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1521 struct tc_cbq_fopt opt; 1517 struct tc_cbq_fopt opt;
1522 1518
1523 if (cl->split || cl->defmap) { 1519 if (cl->split || cl->defmap) {
1524 opt.split = cl->split ? cl->split->classid : 0; 1520 opt.split = cl->split ? cl->split->common.classid : 0;
1525 opt.defmap = cl->defmap; 1521 opt.defmap = cl->defmap;
1526 opt.defchange = ~0; 1522 opt.defchange = ~0;
1527 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); 1523 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
@@ -1602,10 +1598,10 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1602 struct nlattr *nest; 1598 struct nlattr *nest;
1603 1599
1604 if (cl->tparent) 1600 if (cl->tparent)
1605 tcm->tcm_parent = cl->tparent->classid; 1601 tcm->tcm_parent = cl->tparent->common.classid;
1606 else 1602 else
1607 tcm->tcm_parent = TC_H_ROOT; 1603 tcm->tcm_parent = TC_H_ROOT;
1608 tcm->tcm_handle = cl->classid; 1604 tcm->tcm_handle = cl->common.classid;
1609 tcm->tcm_info = cl->q->handle; 1605 tcm->tcm_info = cl->q->handle;
1610 1606
1611 nest = nla_nest_start(skb, TCA_OPTIONS); 1607 nest = nla_nest_start(skb, TCA_OPTIONS);
@@ -1650,8 +1646,10 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1650 1646
1651 if (cl) { 1647 if (cl) {
1652 if (new == NULL) { 1648 if (new == NULL) {
1653 if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1649 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1654 cl->classid)) == NULL) 1650 &pfifo_qdisc_ops,
1651 cl->common.classid);
1652 if (new == NULL)
1655 return -ENOBUFS; 1653 return -ENOBUFS;
1656 } else { 1654 } else {
1657#ifdef CONFIG_NET_CLS_ACT 1655#ifdef CONFIG_NET_CLS_ACT
@@ -1716,6 +1714,7 @@ static void
1716cbq_destroy(struct Qdisc* sch) 1714cbq_destroy(struct Qdisc* sch)
1717{ 1715{
1718 struct cbq_sched_data *q = qdisc_priv(sch); 1716 struct cbq_sched_data *q = qdisc_priv(sch);
1717 struct hlist_node *n, *next;
1719 struct cbq_class *cl; 1718 struct cbq_class *cl;
1720 unsigned h; 1719 unsigned h;
1721 1720
@@ -1727,18 +1726,16 @@ cbq_destroy(struct Qdisc* sch)
1727 * classes from root to leafs which means that filters can still 1726 * classes from root to leafs which means that filters can still
1728 * be bound to classes which have been destroyed already. --TGR '04 1727 * be bound to classes which have been destroyed already. --TGR '04
1729 */ 1728 */
1730 for (h = 0; h < 16; h++) { 1729 for (h = 0; h < q->clhash.hashsize; h++) {
1731 for (cl = q->classes[h]; cl; cl = cl->next) 1730 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
1732 tcf_destroy_chain(&cl->filter_list); 1731 tcf_destroy_chain(&cl->filter_list);
1733 } 1732 }
1734 for (h = 0; h < 16; h++) { 1733 for (h = 0; h < q->clhash.hashsize; h++) {
1735 struct cbq_class *next; 1734 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
1736 1735 common.hnode)
1737 for (cl = q->classes[h]; cl; cl = next) {
1738 next = cl->next;
1739 cbq_destroy_class(sch, cl); 1736 cbq_destroy_class(sch, cl);
1740 }
1741 } 1737 }
1738 qdisc_class_hash_destroy(&q->clhash);
1742} 1739}
1743 1740
1744static void cbq_put(struct Qdisc *sch, unsigned long arg) 1741static void cbq_put(struct Qdisc *sch, unsigned long arg)
@@ -1747,12 +1744,13 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1747 1744
1748 if (--cl->refcnt == 0) { 1745 if (--cl->refcnt == 0) {
1749#ifdef CONFIG_NET_CLS_ACT 1746#ifdef CONFIG_NET_CLS_ACT
1747 spinlock_t *root_lock = qdisc_root_lock(sch);
1750 struct cbq_sched_data *q = qdisc_priv(sch); 1748 struct cbq_sched_data *q = qdisc_priv(sch);
1751 1749
1752 spin_lock_bh(&sch->dev->queue_lock); 1750 spin_lock_bh(root_lock);
1753 if (q->rx_class == cl) 1751 if (q->rx_class == cl)
1754 q->rx_class = NULL; 1752 q->rx_class = NULL;
1755 spin_unlock_bh(&sch->dev->queue_lock); 1753 spin_unlock_bh(root_lock);
1756#endif 1754#endif
1757 1755
1758 cbq_destroy_class(sch, cl); 1756 cbq_destroy_class(sch, cl);
@@ -1781,7 +1779,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1781 if (cl) { 1779 if (cl) {
1782 /* Check parent */ 1780 /* Check parent */
1783 if (parentid) { 1781 if (parentid) {
1784 if (cl->tparent && cl->tparent->classid != parentid) 1782 if (cl->tparent &&
1783 cl->tparent->common.classid != parentid)
1785 return -EINVAL; 1784 return -EINVAL;
1786 if (!cl->tparent && parentid != TC_H_ROOT) 1785 if (!cl->tparent && parentid != TC_H_ROOT)
1787 return -EINVAL; 1786 return -EINVAL;
@@ -1830,7 +1829,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1830 1829
1831 if (tca[TCA_RATE]) 1830 if (tca[TCA_RATE])
1832 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1831 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1833 &sch->dev->queue_lock, 1832 qdisc_root_lock(sch),
1834 tca[TCA_RATE]); 1833 tca[TCA_RATE]);
1835 return 0; 1834 return 0;
1836 } 1835 }
@@ -1881,9 +1880,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1881 cl->R_tab = rtab; 1880 cl->R_tab = rtab;
1882 rtab = NULL; 1881 rtab = NULL;
1883 cl->refcnt = 1; 1882 cl->refcnt = 1;
1884 if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) 1883 if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1884 &pfifo_qdisc_ops, classid)))
1885 cl->q = &noop_qdisc; 1885 cl->q = &noop_qdisc;
1886 cl->classid = classid; 1886 cl->common.classid = classid;
1887 cl->tparent = parent; 1887 cl->tparent = parent;
1888 cl->qdisc = sch; 1888 cl->qdisc = sch;
1889 cl->allot = parent->allot; 1889 cl->allot = parent->allot;
@@ -1916,9 +1916,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1916 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1916 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1917 sch_tree_unlock(sch); 1917 sch_tree_unlock(sch);
1918 1918
1919 qdisc_class_hash_grow(sch, &q->clhash);
1920
1919 if (tca[TCA_RATE]) 1921 if (tca[TCA_RATE])
1920 gen_new_estimator(&cl->bstats, &cl->rate_est, 1922 gen_new_estimator(&cl->bstats, &cl->rate_est,
1921 &sch->dev->queue_lock, tca[TCA_RATE]); 1923 qdisc_root_lock(sch), tca[TCA_RATE]);
1922 1924
1923 *arg = (unsigned long)cl; 1925 *arg = (unsigned long)cl;
1924 return 0; 1926 return 0;
@@ -2008,15 +2010,15 @@ static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2008static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2010static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2009{ 2011{
2010 struct cbq_sched_data *q = qdisc_priv(sch); 2012 struct cbq_sched_data *q = qdisc_priv(sch);
2013 struct cbq_class *cl;
2014 struct hlist_node *n;
2011 unsigned h; 2015 unsigned h;
2012 2016
2013 if (arg->stop) 2017 if (arg->stop)
2014 return; 2018 return;
2015 2019
2016 for (h = 0; h < 16; h++) { 2020 for (h = 0; h < q->clhash.hashsize; h++) {
2017 struct cbq_class *cl; 2021 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
2018
2019 for (cl = q->classes[h]; cl; cl = cl->next) {
2020 if (arg->count < arg->skip) { 2022 if (arg->count < arg->skip) {
2021 arg->count++; 2023 arg->count++;
2022 continue; 2024 continue;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index c4c1317cd47d..3aafbd17393a 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -60,7 +60,8 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
60 sch, p, new, old); 60 sch, p, new, old);
61 61
62 if (new == NULL) { 62 if (new == NULL) {
63 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 63 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
64 &pfifo_qdisc_ops,
64 sch->handle); 65 sch->handle);
65 if (new == NULL) 66 if (new == NULL)
66 new = &noop_qdisc; 67 new = &noop_qdisc;
@@ -390,7 +391,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
390 p->default_index = default_index; 391 p->default_index = default_index;
391 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); 392 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
392 393
393 p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); 394 p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
395 &pfifo_qdisc_ops, sch->handle);
394 if (p->q == NULL) 396 if (p->q == NULL)
395 p->q = &noop_qdisc; 397 p->q = &noop_qdisc;
396 398
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 95ed48221652..1d97fa42c902 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -48,10 +48,10 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
48 struct fifo_sched_data *q = qdisc_priv(sch); 48 struct fifo_sched_data *q = qdisc_priv(sch);
49 49
50 if (opt == NULL) { 50 if (opt == NULL) {
51 u32 limit = sch->dev->tx_queue_len ? : 1; 51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
52 52
53 if (sch->ops == &bfifo_qdisc_ops) 53 if (sch->ops == &bfifo_qdisc_ops)
54 limit *= sch->dev->mtu; 54 limit *= qdisc_dev(sch)->mtu;
55 55
56 q->limit = limit; 56 q->limit = limit;
57 } else { 57 } else {
@@ -107,3 +107,46 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
107 .owner = THIS_MODULE, 107 .owner = THIS_MODULE,
108}; 108};
109EXPORT_SYMBOL(bfifo_qdisc_ops); 109EXPORT_SYMBOL(bfifo_qdisc_ops);
110
111/* Pass size change message down to embedded FIFO */
112int fifo_set_limit(struct Qdisc *q, unsigned int limit)
113{
114 struct nlattr *nla;
115 int ret = -ENOMEM;
116
117 /* Hack to avoid sending change message to non-FIFO */
118 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
119 return 0;
120
121 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
122 if (nla) {
123 nla->nla_type = RTM_NEWQDISC;
124 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
125 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
126
127 ret = q->ops->change(q, nla);
128 kfree(nla);
129 }
130 return ret;
131}
132EXPORT_SYMBOL(fifo_set_limit);
133
134struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
135 unsigned int limit)
136{
137 struct Qdisc *q;
138 int err = -ENOMEM;
139
140 q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
141 ops, TC_H_MAKE(sch->handle, 1));
142 if (q) {
143 err = fifo_set_limit(q, limit);
144 if (err < 0) {
145 qdisc_destroy(q);
146 q = NULL;
147 }
148 }
149
150 return q ? : ERR_PTR(err);
151}
152EXPORT_SYMBOL(fifo_create_dflt);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 13afa7214392..e244c462e6bd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,58 +29,36 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * dev->queue_lock spinlock. 32 * qdisc_root_lock(qdisc) spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via top level device 35 * - enqueue, dequeue are serialized via qdisc root lock
36 * spinlock dev->queue_lock. 36 * - ingress filtering is also serialized via qdisc root lock
37 * - ingress filtering is serialized via top level device
38 * spinlock dev->ingress_lock.
39 * - updates to tree and tree walking are only done under the rtnl mutex. 37 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 38 */
41 39
42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->queue_lock)
44 __acquires(dev->ingress_lock)
45{
46 spin_lock_bh(&dev->queue_lock);
47 spin_lock(&dev->ingress_lock);
48}
49EXPORT_SYMBOL(qdisc_lock_tree);
50
51void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->ingress_lock)
53 __releases(dev->queue_lock)
54{
55 spin_unlock(&dev->ingress_lock);
56 spin_unlock_bh(&dev->queue_lock);
57}
58EXPORT_SYMBOL(qdisc_unlock_tree);
59
60static inline int qdisc_qlen(struct Qdisc *q) 40static inline int qdisc_qlen(struct Qdisc *q)
61{ 41{
62 return q->q.qlen; 42 return q->q.qlen;
63} 43}
64 44
65static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, 45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
66 struct Qdisc *q)
67{ 46{
68 if (unlikely(skb->next)) 47 if (unlikely(skb->next))
69 dev->gso_skb = skb; 48 q->gso_skb = skb;
70 else 49 else
71 q->ops->requeue(skb, q); 50 q->ops->requeue(skb, q);
72 51
73 netif_schedule(dev); 52 __netif_schedule(q);
74 return 0; 53 return 0;
75} 54}
76 55
77static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, 56static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
78 struct Qdisc *q)
79{ 57{
80 struct sk_buff *skb; 58 struct sk_buff *skb;
81 59
82 if ((skb = dev->gso_skb)) 60 if ((skb = q->gso_skb))
83 dev->gso_skb = NULL; 61 q->gso_skb = NULL;
84 else 62 else
85 skb = q->dequeue(q); 63 skb = q->dequeue(q);
86 64
@@ -88,12 +66,12 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
88} 66}
89 67
90static inline int handle_dev_cpu_collision(struct sk_buff *skb, 68static inline int handle_dev_cpu_collision(struct sk_buff *skb,
91 struct net_device *dev, 69 struct netdev_queue *dev_queue,
92 struct Qdisc *q) 70 struct Qdisc *q)
93{ 71{
94 int ret; 72 int ret;
95 73
96 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { 74 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
97 /* 75 /*
98 * Same CPU holding the lock. It may be a transient 76 * Same CPU holding the lock. It may be a transient
99 * configuration error, when hard_start_xmit() recurses. We 77 * configuration error, when hard_start_xmit() recurses. We
@@ -103,7 +81,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
103 kfree_skb(skb); 81 kfree_skb(skb);
104 if (net_ratelimit()) 82 if (net_ratelimit())
105 printk(KERN_WARNING "Dead loop on netdevice %s, " 83 printk(KERN_WARNING "Dead loop on netdevice %s, "
106 "fix it urgently!\n", dev->name); 84 "fix it urgently!\n", dev_queue->dev->name);
107 ret = qdisc_qlen(q); 85 ret = qdisc_qlen(q);
108 } else { 86 } else {
109 /* 87 /*
@@ -111,22 +89,22 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
111 * some time. 89 * some time.
112 */ 90 */
113 __get_cpu_var(netdev_rx_stat).cpu_collision++; 91 __get_cpu_var(netdev_rx_stat).cpu_collision++;
114 ret = dev_requeue_skb(skb, dev, q); 92 ret = dev_requeue_skb(skb, q);
115 } 93 }
116 94
117 return ret; 95 return ret;
118} 96}
119 97
120/* 98/*
121 * NOTE: Called under dev->queue_lock with locally disabled BH. 99 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
122 * 100 *
123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 101 * __QDISC_STATE_RUNNING guarantees only one CPU can process
124 * device at a time. dev->queue_lock serializes queue accesses for 102 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
125 * this device AND dev->qdisc pointer itself. 103 * this queue.
126 * 104 *
127 * netif_tx_lock serializes accesses to device driver. 105 * netif_tx_lock serializes accesses to device driver.
128 * 106 *
129 * dev->queue_lock and netif_tx_lock are mutually exclusive, 107 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
130 * if one is grabbed, another must be free. 108 * if one is grabbed, another must be free.
131 * 109 *
132 * Note, that this procedure can be called by a watchdog timer 110 * Note, that this procedure can be called by a watchdog timer
@@ -136,27 +114,32 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
136 * >0 - queue is not empty. 114 * >0 - queue is not empty.
137 * 115 *
138 */ 116 */
139static inline int qdisc_restart(struct net_device *dev) 117static inline int qdisc_restart(struct Qdisc *q)
140{ 118{
141 struct Qdisc *q = dev->qdisc; 119 struct netdev_queue *txq;
142 struct sk_buff *skb;
143 int ret = NETDEV_TX_BUSY; 120 int ret = NETDEV_TX_BUSY;
121 struct net_device *dev;
122 spinlock_t *root_lock;
123 struct sk_buff *skb;
144 124
145 /* Dequeue packet */ 125 /* Dequeue packet */
146 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) 126 if (unlikely((skb = dequeue_skb(q)) == NULL))
147 return 0; 127 return 0;
148 128
129 root_lock = qdisc_root_lock(q);
130
131 /* And release qdisc */
132 spin_unlock(root_lock);
149 133
150 /* And release queue */ 134 dev = qdisc_dev(q);
151 spin_unlock(&dev->queue_lock); 135 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
152 136
153 HARD_TX_LOCK(dev, smp_processor_id()); 137 HARD_TX_LOCK(dev, txq, smp_processor_id());
154 if (!netif_subqueue_stopped(dev, skb)) 138 if (!netif_subqueue_stopped(dev, skb))
155 ret = dev_hard_start_xmit(skb, dev); 139 ret = dev_hard_start_xmit(skb, dev, txq);
156 HARD_TX_UNLOCK(dev); 140 HARD_TX_UNLOCK(dev, txq);
157 141
158 spin_lock(&dev->queue_lock); 142 spin_lock(root_lock);
159 q = dev->qdisc;
160 143
161 switch (ret) { 144 switch (ret) {
162 case NETDEV_TX_OK: 145 case NETDEV_TX_OK:
@@ -166,7 +149,7 @@ static inline int qdisc_restart(struct net_device *dev)
166 149
167 case NETDEV_TX_LOCKED: 150 case NETDEV_TX_LOCKED:
168 /* Driver try lock failed */ 151 /* Driver try lock failed */
169 ret = handle_dev_cpu_collision(skb, dev, q); 152 ret = handle_dev_cpu_collision(skb, txq, q);
170 break; 153 break;
171 154
172 default: 155 default:
@@ -175,33 +158,33 @@ static inline int qdisc_restart(struct net_device *dev)
175 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 158 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
176 dev->name, ret, q->q.qlen); 159 dev->name, ret, q->q.qlen);
177 160
178 ret = dev_requeue_skb(skb, dev, q); 161 ret = dev_requeue_skb(skb, q);
179 break; 162 break;
180 } 163 }
181 164
165 if (ret && netif_tx_queue_stopped(txq))
166 ret = 0;
167
182 return ret; 168 return ret;
183} 169}
184 170
185void __qdisc_run(struct net_device *dev) 171void __qdisc_run(struct Qdisc *q)
186{ 172{
187 unsigned long start_time = jiffies; 173 unsigned long start_time = jiffies;
188 174
189 while (qdisc_restart(dev)) { 175 while (qdisc_restart(q)) {
190 if (netif_queue_stopped(dev))
191 break;
192
193 /* 176 /*
194 * Postpone processing if 177 * Postpone processing if
195 * 1. another process needs the CPU; 178 * 1. another process needs the CPU;
196 * 2. we've been doing it for too long. 179 * 2. we've been doing it for too long.
197 */ 180 */
198 if (need_resched() || jiffies != start_time) { 181 if (need_resched() || jiffies != start_time) {
199 netif_schedule(dev); 182 __netif_schedule(q);
200 break; 183 break;
201 } 184 }
202 } 185 }
203 186
204 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 187 clear_bit(__QDISC_STATE_RUNNING, &q->state);
205} 188}
206 189
207static void dev_watchdog(unsigned long arg) 190static void dev_watchdog(unsigned long arg)
@@ -209,19 +192,35 @@ static void dev_watchdog(unsigned long arg)
209 struct net_device *dev = (struct net_device *)arg; 192 struct net_device *dev = (struct net_device *)arg;
210 193
211 netif_tx_lock(dev); 194 netif_tx_lock(dev);
212 if (dev->qdisc != &noop_qdisc) { 195 if (!qdisc_tx_is_noop(dev)) {
213 if (netif_device_present(dev) && 196 if (netif_device_present(dev) &&
214 netif_running(dev) && 197 netif_running(dev) &&
215 netif_carrier_ok(dev)) { 198 netif_carrier_ok(dev)) {
216 if (netif_queue_stopped(dev) && 199 int some_queue_stopped = 0;
217 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { 200 unsigned int i;
201
202 for (i = 0; i < dev->num_tx_queues; i++) {
203 struct netdev_queue *txq;
204
205 txq = netdev_get_tx_queue(dev, i);
206 if (netif_tx_queue_stopped(txq)) {
207 some_queue_stopped = 1;
208 break;
209 }
210 }
218 211
219 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", 212 if (some_queue_stopped &&
213 time_after(jiffies, (dev->trans_start +
214 dev->watchdog_timeo))) {
215 printk(KERN_INFO "NETDEV WATCHDOG: %s: "
216 "transmit timed out\n",
220 dev->name); 217 dev->name);
221 dev->tx_timeout(dev); 218 dev->tx_timeout(dev);
222 WARN_ON_ONCE(1); 219 WARN_ON_ONCE(1);
223 } 220 }
224 if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) 221 if (!mod_timer(&dev->watchdog_timer,
222 round_jiffies(jiffies +
223 dev->watchdog_timeo)))
225 dev_hold(dev); 224 dev_hold(dev);
226 } 225 }
227 } 226 }
@@ -317,12 +316,18 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
317 .owner = THIS_MODULE, 316 .owner = THIS_MODULE,
318}; 317};
319 318
319static struct netdev_queue noop_netdev_queue = {
320 .qdisc = &noop_qdisc,
321};
322
320struct Qdisc noop_qdisc = { 323struct Qdisc noop_qdisc = {
321 .enqueue = noop_enqueue, 324 .enqueue = noop_enqueue,
322 .dequeue = noop_dequeue, 325 .dequeue = noop_dequeue,
323 .flags = TCQ_F_BUILTIN, 326 .flags = TCQ_F_BUILTIN,
324 .ops = &noop_qdisc_ops, 327 .ops = &noop_qdisc_ops,
325 .list = LIST_HEAD_INIT(noop_qdisc.list), 328 .list = LIST_HEAD_INIT(noop_qdisc.list),
329 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
330 .dev_queue = &noop_netdev_queue,
326}; 331};
327EXPORT_SYMBOL(noop_qdisc); 332EXPORT_SYMBOL(noop_qdisc);
328 333
@@ -344,103 +349,49 @@ static struct Qdisc noqueue_qdisc = {
344}; 349};
345 350
346 351
347static const u8 prio2band[TC_PRIO_MAX+1] = 352static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
348 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
349
350/* 3-band FIFO queue: old style, but should be a bit faster than
351 generic prio+fifo combination.
352 */
353
354#define PFIFO_FAST_BANDS 3
355
356static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
357 struct Qdisc *qdisc)
358{ 353{
359 struct sk_buff_head *list = qdisc_priv(qdisc); 354 struct sk_buff_head *list = &qdisc->q;
360 return list + prio2band[skb->priority & TC_PRIO_MAX];
361}
362
363static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
364{
365 struct sk_buff_head *list = prio2list(skb, qdisc);
366 355
367 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { 356 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len)
368 qdisc->q.qlen++;
369 return __qdisc_enqueue_tail(skb, qdisc, list); 357 return __qdisc_enqueue_tail(skb, qdisc, list);
370 }
371 358
372 return qdisc_drop(skb, qdisc); 359 return qdisc_drop(skb, qdisc);
373} 360}
374 361
375static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 362static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc)
376{ 363{
377 int prio; 364 struct sk_buff_head *list = &qdisc->q;
378 struct sk_buff_head *list = qdisc_priv(qdisc);
379 365
380 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 366 if (!skb_queue_empty(list))
381 if (!skb_queue_empty(list + prio)) { 367 return __qdisc_dequeue_head(qdisc, list);
382 qdisc->q.qlen--;
383 return __qdisc_dequeue_head(qdisc, list + prio);
384 }
385 }
386 368
387 return NULL; 369 return NULL;
388} 370}
389 371
390static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 372static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
391{ 373{
392 qdisc->q.qlen++; 374 return __qdisc_requeue(skb, qdisc, &qdisc->q);
393 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
394} 375}
395 376
396static void pfifo_fast_reset(struct Qdisc* qdisc) 377static void fifo_fast_reset(struct Qdisc* qdisc)
397{ 378{
398 int prio; 379 __qdisc_reset_queue(qdisc, &qdisc->q);
399 struct sk_buff_head *list = qdisc_priv(qdisc);
400
401 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
402 __qdisc_reset_queue(qdisc, list + prio);
403
404 qdisc->qstats.backlog = 0; 380 qdisc->qstats.backlog = 0;
405 qdisc->q.qlen = 0;
406}
407
408static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
409{
410 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
411
412 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
413 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
414 return skb->len;
415
416nla_put_failure:
417 return -1;
418}
419
420static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
421{
422 int prio;
423 struct sk_buff_head *list = qdisc_priv(qdisc);
424
425 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
426 skb_queue_head_init(list + prio);
427
428 return 0;
429} 381}
430 382
431static struct Qdisc_ops pfifo_fast_ops __read_mostly = { 383static struct Qdisc_ops fifo_fast_ops __read_mostly = {
432 .id = "pfifo_fast", 384 .id = "fifo_fast",
433 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), 385 .priv_size = 0,
434 .enqueue = pfifo_fast_enqueue, 386 .enqueue = fifo_fast_enqueue,
435 .dequeue = pfifo_fast_dequeue, 387 .dequeue = fifo_fast_dequeue,
436 .requeue = pfifo_fast_requeue, 388 .requeue = fifo_fast_requeue,
437 .init = pfifo_fast_init, 389 .reset = fifo_fast_reset,
438 .reset = pfifo_fast_reset,
439 .dump = pfifo_fast_dump,
440 .owner = THIS_MODULE, 390 .owner = THIS_MODULE,
441}; 391};
442 392
443struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) 393struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
394 struct Qdisc_ops *ops)
444{ 395{
445 void *p; 396 void *p;
446 struct Qdisc *sch; 397 struct Qdisc *sch;
@@ -462,8 +413,8 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
462 sch->ops = ops; 413 sch->ops = ops;
463 sch->enqueue = ops->enqueue; 414 sch->enqueue = ops->enqueue;
464 sch->dequeue = ops->dequeue; 415 sch->dequeue = ops->dequeue;
465 sch->dev = dev; 416 sch->dev_queue = dev_queue;
466 dev_hold(dev); 417 dev_hold(qdisc_dev(sch));
467 atomic_set(&sch->refcnt, 1); 418 atomic_set(&sch->refcnt, 1);
468 419
469 return sch; 420 return sch;
@@ -471,15 +422,16 @@ errout:
471 return ERR_PTR(err); 422 return ERR_PTR(err);
472} 423}
473 424
474struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, 425struct Qdisc * qdisc_create_dflt(struct net_device *dev,
426 struct netdev_queue *dev_queue,
427 struct Qdisc_ops *ops,
475 unsigned int parentid) 428 unsigned int parentid)
476{ 429{
477 struct Qdisc *sch; 430 struct Qdisc *sch;
478 431
479 sch = qdisc_alloc(dev, ops); 432 sch = qdisc_alloc(dev_queue, ops);
480 if (IS_ERR(sch)) 433 if (IS_ERR(sch))
481 goto errout; 434 goto errout;
482 sch->stats_lock = &dev->queue_lock;
483 sch->parent = parentid; 435 sch->parent = parentid;
484 436
485 if (!ops->init || ops->init(sch, NULL) == 0) 437 if (!ops->init || ops->init(sch, NULL) == 0)
@@ -491,7 +443,7 @@ errout:
491} 443}
492EXPORT_SYMBOL(qdisc_create_dflt); 444EXPORT_SYMBOL(qdisc_create_dflt);
493 445
494/* Under dev->queue_lock and BH! */ 446/* Under qdisc_root_lock(qdisc) and BH! */
495 447
496void qdisc_reset(struct Qdisc *qdisc) 448void qdisc_reset(struct Qdisc *qdisc)
497{ 449{
@@ -508,86 +460,167 @@ EXPORT_SYMBOL(qdisc_reset);
508static void __qdisc_destroy(struct rcu_head *head) 460static void __qdisc_destroy(struct rcu_head *head)
509{ 461{
510 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); 462 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
463 const struct Qdisc_ops *ops = qdisc->ops;
464
465 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
466 if (ops->reset)
467 ops->reset(qdisc);
468 if (ops->destroy)
469 ops->destroy(qdisc);
470
471 module_put(ops->owner);
472 dev_put(qdisc_dev(qdisc));
473
474 kfree_skb(qdisc->gso_skb);
475
511 kfree((char *) qdisc - qdisc->padded); 476 kfree((char *) qdisc - qdisc->padded);
512} 477}
513 478
514/* Under dev->queue_lock and BH! */ 479/* Under qdisc_root_lock(qdisc) and BH! */
515 480
516void qdisc_destroy(struct Qdisc *qdisc) 481void qdisc_destroy(struct Qdisc *qdisc)
517{ 482{
518 const struct Qdisc_ops *ops = qdisc->ops; 483 struct net_device *dev = qdisc_dev(qdisc);
519 484
520 if (qdisc->flags & TCQ_F_BUILTIN || 485 if (qdisc->flags & TCQ_F_BUILTIN ||
521 !atomic_dec_and_test(&qdisc->refcnt)) 486 !atomic_dec_and_test(&qdisc->refcnt))
522 return; 487 return;
523 488
489 spin_lock_bh(&dev->qdisc_list_lock);
524 list_del(&qdisc->list); 490 list_del(&qdisc->list);
525 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 491 spin_unlock_bh(&dev->qdisc_list_lock);
526 if (ops->reset)
527 ops->reset(qdisc);
528 if (ops->destroy)
529 ops->destroy(qdisc);
530 492
531 module_put(ops->owner);
532 dev_put(qdisc->dev);
533 call_rcu(&qdisc->q_rcu, __qdisc_destroy); 493 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
534} 494}
535EXPORT_SYMBOL(qdisc_destroy); 495EXPORT_SYMBOL(qdisc_destroy);
536 496
497static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
498{
499 unsigned int i;
500
501 for (i = 0; i < dev->num_tx_queues; i++) {
502 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
503
504 if (txq->qdisc_sleeping != &noop_qdisc)
505 return false;
506 }
507 return true;
508}
509
510static void attach_one_default_qdisc(struct net_device *dev,
511 struct netdev_queue *dev_queue,
512 void *_unused)
513{
514 struct Qdisc *qdisc;
515
516 if (dev->tx_queue_len) {
517 qdisc = qdisc_create_dflt(dev, dev_queue,
518 &fifo_fast_ops, TC_H_ROOT);
519 if (!qdisc) {
520 printk(KERN_INFO "%s: activation failed\n", dev->name);
521 return;
522 }
523 spin_lock_bh(&dev->qdisc_list_lock);
524 list_add_tail(&qdisc->list, &dev->qdisc_list);
525 spin_unlock_bh(&dev->qdisc_list_lock);
526 } else {
527 qdisc = &noqueue_qdisc;
528 }
529 dev_queue->qdisc_sleeping = qdisc;
530}
531
532static void transition_one_qdisc(struct net_device *dev,
533 struct netdev_queue *dev_queue,
534 void *_need_watchdog)
535{
536 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
537 int *need_watchdog_p = _need_watchdog;
538
539 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
540 if (new_qdisc != &noqueue_qdisc)
541 *need_watchdog_p = 1;
542}
543
537void dev_activate(struct net_device *dev) 544void dev_activate(struct net_device *dev)
538{ 545{
546 int need_watchdog;
547
539 /* No queueing discipline is attached to device; 548 /* No queueing discipline is attached to device;
540 create default one i.e. pfifo_fast for devices, 549 * create default one i.e. fifo_fast for devices,
541 which need queueing and noqueue_qdisc for 550 * which need queueing and noqueue_qdisc for
542 virtual interfaces 551 * virtual interfaces.
543 */ 552 */
544 553
545 if (dev->qdisc_sleeping == &noop_qdisc) { 554 if (dev_all_qdisc_sleeping_noop(dev))
546 struct Qdisc *qdisc; 555 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
547 if (dev->tx_queue_len) {
548 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
549 TC_H_ROOT);
550 if (qdisc == NULL) {
551 printk(KERN_INFO "%s: activation failed\n", dev->name);
552 return;
553 }
554 list_add_tail(&qdisc->list, &dev->qdisc_list);
555 } else {
556 qdisc = &noqueue_qdisc;
557 }
558 dev->qdisc_sleeping = qdisc;
559 }
560 556
561 if (!netif_carrier_ok(dev)) 557 if (!netif_carrier_ok(dev))
562 /* Delay activation until next carrier-on event */ 558 /* Delay activation until next carrier-on event */
563 return; 559 return;
564 560
565 spin_lock_bh(&dev->queue_lock); 561 need_watchdog = 0;
566 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 562 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
567 if (dev->qdisc != &noqueue_qdisc) { 563
564 if (need_watchdog) {
568 dev->trans_start = jiffies; 565 dev->trans_start = jiffies;
569 dev_watchdog_up(dev); 566 dev_watchdog_up(dev);
570 } 567 }
571 spin_unlock_bh(&dev->queue_lock);
572} 568}
573 569
574void dev_deactivate(struct net_device *dev) 570static void dev_deactivate_queue(struct net_device *dev,
571 struct netdev_queue *dev_queue,
572 void *_qdisc_default)
575{ 573{
574 struct Qdisc *qdisc_default = _qdisc_default;
575 struct sk_buff *skb = NULL;
576 struct Qdisc *qdisc; 576 struct Qdisc *qdisc;
577 struct sk_buff *skb;
578 int running;
579 577
580 spin_lock_bh(&dev->queue_lock); 578 qdisc = dev_queue->qdisc;
581 qdisc = dev->qdisc; 579 if (qdisc) {
582 dev->qdisc = &noop_qdisc; 580 spin_lock_bh(qdisc_lock(qdisc));
583 581
584 qdisc_reset(qdisc); 582 dev_queue->qdisc = qdisc_default;
583 qdisc_reset(qdisc);
585 584
586 skb = dev->gso_skb; 585 spin_unlock_bh(qdisc_lock(qdisc));
587 dev->gso_skb = NULL; 586 }
588 spin_unlock_bh(&dev->queue_lock);
589 587
590 kfree_skb(skb); 588 kfree_skb(skb);
589}
590
591static bool some_qdisc_is_running(struct net_device *dev, int lock)
592{
593 unsigned int i;
594
595 for (i = 0; i < dev->num_tx_queues; i++) {
596 struct netdev_queue *dev_queue;
597 spinlock_t *root_lock;
598 struct Qdisc *q;
599 int val;
600
601 dev_queue = netdev_get_tx_queue(dev, i);
602 q = dev_queue->qdisc;
603 root_lock = qdisc_root_lock(q);
604
605 if (lock)
606 spin_lock_bh(root_lock);
607
608 val = test_bit(__QDISC_STATE_RUNNING, &q->state);
609
610 if (lock)
611 spin_unlock_bh(root_lock);
612
613 if (val)
614 return true;
615 }
616 return false;
617}
618
619void dev_deactivate(struct net_device *dev)
620{
621 bool running;
622
623 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
591 624
592 dev_watchdog_down(dev); 625 dev_watchdog_down(dev);
593 626
@@ -596,16 +629,14 @@ void dev_deactivate(struct net_device *dev)
596 629
597 /* Wait for outstanding qdisc_run calls. */ 630 /* Wait for outstanding qdisc_run calls. */
598 do { 631 do {
599 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 632 while (some_qdisc_is_running(dev, 0))
600 yield(); 633 yield();
601 634
602 /* 635 /*
603 * Double-check inside queue lock to ensure that all effects 636 * Double-check inside queue lock to ensure that all effects
604 * of the queue run are visible when we return. 637 * of the queue run are visible when we return.
605 */ 638 */
606 spin_lock_bh(&dev->queue_lock); 639 running = some_qdisc_is_running(dev, 1);
607 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
608 spin_unlock_bh(&dev->queue_lock);
609 640
610 /* 641 /*
611 * The running flag should never be set at this point because 642 * The running flag should never be set at this point because
@@ -618,32 +649,46 @@ void dev_deactivate(struct net_device *dev)
618 } while (WARN_ON_ONCE(running)); 649 } while (WARN_ON_ONCE(running));
619} 650}
620 651
652static void dev_init_scheduler_queue(struct net_device *dev,
653 struct netdev_queue *dev_queue,
654 void *_qdisc)
655{
656 struct Qdisc *qdisc = _qdisc;
657
658 dev_queue->qdisc = qdisc;
659 dev_queue->qdisc_sleeping = qdisc;
660}
661
621void dev_init_scheduler(struct net_device *dev) 662void dev_init_scheduler(struct net_device *dev)
622{ 663{
623 qdisc_lock_tree(dev); 664 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
624 dev->qdisc = &noop_qdisc; 665 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
625 dev->qdisc_sleeping = &noop_qdisc;
626 INIT_LIST_HEAD(&dev->qdisc_list);
627 qdisc_unlock_tree(dev);
628 666
629 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 667 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
630} 668}
631 669
632void dev_shutdown(struct net_device *dev) 670static void shutdown_scheduler_queue(struct net_device *dev,
671 struct netdev_queue *dev_queue,
672 void *_qdisc_default)
633{ 673{
634 struct Qdisc *qdisc; 674 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
675 struct Qdisc *qdisc_default = _qdisc_default;
676
677 if (qdisc) {
678 spinlock_t *root_lock = qdisc_root_lock(qdisc);
635 679
636 qdisc_lock_tree(dev); 680 dev_queue->qdisc = qdisc_default;
637 qdisc = dev->qdisc_sleeping; 681 dev_queue->qdisc_sleeping = qdisc_default;
638 dev->qdisc = &noop_qdisc; 682
639 dev->qdisc_sleeping = &noop_qdisc; 683 spin_lock(root_lock);
640 qdisc_destroy(qdisc);
641#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
642 if ((qdisc = dev->qdisc_ingress) != NULL) {
643 dev->qdisc_ingress = NULL;
644 qdisc_destroy(qdisc); 684 qdisc_destroy(qdisc);
685 spin_unlock(root_lock);
645 } 686 }
646#endif 687}
688
689void dev_shutdown(struct net_device *dev)
690{
691 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
692 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
647 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 693 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
648 qdisc_unlock_tree(dev);
649} 694}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index c89fba56db56..39fa28511f07 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -164,7 +164,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
164 * if no default DP has been configured. This 164 * if no default DP has been configured. This
165 * allows for DP flows to be left untouched. 165 * allows for DP flows to be left untouched.
166 */ 166 */
167 if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len) 167 if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
168 return qdisc_enqueue_tail(skb, sch); 168 return qdisc_enqueue_tail(skb, sch);
169 else 169 else
170 goto drop; 170 goto drop;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index e817aa00441d..5090708ba384 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -113,7 +113,7 @@ enum hfsc_class_flags
113 113
114struct hfsc_class 114struct hfsc_class
115{ 115{
116 u32 classid; /* class id */ 116 struct Qdisc_class_common cl_common;
117 unsigned int refcnt; /* usage count */ 117 unsigned int refcnt; /* usage count */
118 118
119 struct gnet_stats_basic bstats; 119 struct gnet_stats_basic bstats;
@@ -134,7 +134,6 @@ struct hfsc_class
134 struct rb_node vt_node; /* parent's vt_tree member */ 134 struct rb_node vt_node; /* parent's vt_tree member */
135 struct rb_root cf_tree; /* active children sorted by cl_f */ 135 struct rb_root cf_tree; /* active children sorted by cl_f */
136 struct rb_node cf_node; /* parent's cf_heap member */ 136 struct rb_node cf_node; /* parent's cf_heap member */
137 struct list_head hlist; /* hash list member */
138 struct list_head dlist; /* drop list member */ 137 struct list_head dlist; /* drop list member */
139 138
140 u64 cl_total; /* total work in bytes */ 139 u64 cl_total; /* total work in bytes */
@@ -177,13 +176,11 @@ struct hfsc_class
177 unsigned long cl_nactive; /* number of active children */ 176 unsigned long cl_nactive; /* number of active children */
178}; 177};
179 178
180#define HFSC_HSIZE 16
181
182struct hfsc_sched 179struct hfsc_sched
183{ 180{
184 u16 defcls; /* default class id */ 181 u16 defcls; /* default class id */
185 struct hfsc_class root; /* root class */ 182 struct hfsc_class root; /* root class */
186 struct list_head clhash[HFSC_HSIZE]; /* class hash */ 183 struct Qdisc_class_hash clhash; /* class hash */
187 struct rb_root eligible; /* eligible tree */ 184 struct rb_root eligible; /* eligible tree */
188 struct list_head droplist; /* active leaf class list (for 185 struct list_head droplist; /* active leaf class list (for
189 dropping) */ 186 dropping) */
@@ -933,26 +930,16 @@ hfsc_adjust_levels(struct hfsc_class *cl)
933 } while ((cl = cl->cl_parent) != NULL); 930 } while ((cl = cl->cl_parent) != NULL);
934} 931}
935 932
936static inline unsigned int
937hfsc_hash(u32 h)
938{
939 h ^= h >> 8;
940 h ^= h >> 4;
941
942 return h & (HFSC_HSIZE - 1);
943}
944
945static inline struct hfsc_class * 933static inline struct hfsc_class *
946hfsc_find_class(u32 classid, struct Qdisc *sch) 934hfsc_find_class(u32 classid, struct Qdisc *sch)
947{ 935{
948 struct hfsc_sched *q = qdisc_priv(sch); 936 struct hfsc_sched *q = qdisc_priv(sch);
949 struct hfsc_class *cl; 937 struct Qdisc_class_common *clc;
950 938
951 list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) { 939 clc = qdisc_class_find(&q->clhash, classid);
952 if (cl->classid == classid) 940 if (clc == NULL)
953 return cl; 941 return NULL;
954 } 942 return container_of(clc, struct hfsc_class, cl_common);
955 return NULL;
956} 943}
957 944
958static void 945static void
@@ -1032,7 +1019,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1032 1019
1033 if (cl != NULL) { 1020 if (cl != NULL) {
1034 if (parentid) { 1021 if (parentid) {
1035 if (cl->cl_parent && cl->cl_parent->classid != parentid) 1022 if (cl->cl_parent &&
1023 cl->cl_parent->cl_common.classid != parentid)
1036 return -EINVAL; 1024 return -EINVAL;
1037 if (cl->cl_parent == NULL && parentid != TC_H_ROOT) 1025 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1038 return -EINVAL; 1026 return -EINVAL;
@@ -1057,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1057 1045
1058 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1059 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1060 &sch->dev->queue_lock, 1048 qdisc_root_lock(sch),
1061 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1062 return 0; 1050 return 0;
1063 } 1051 }
@@ -1091,11 +1079,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1091 if (usc != NULL) 1079 if (usc != NULL)
1092 hfsc_change_usc(cl, usc, 0); 1080 hfsc_change_usc(cl, usc, 0);
1093 1081
1082 cl->cl_common.classid = classid;
1094 cl->refcnt = 1; 1083 cl->refcnt = 1;
1095 cl->classid = classid;
1096 cl->sched = q; 1084 cl->sched = q;
1097 cl->cl_parent = parent; 1085 cl->cl_parent = parent;
1098 cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 1086 cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1087 &pfifo_qdisc_ops, classid);
1099 if (cl->qdisc == NULL) 1088 if (cl->qdisc == NULL)
1100 cl->qdisc = &noop_qdisc; 1089 cl->qdisc = &noop_qdisc;
1101 INIT_LIST_HEAD(&cl->children); 1090 INIT_LIST_HEAD(&cl->children);
@@ -1103,7 +1092,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1103 cl->cf_tree = RB_ROOT; 1092 cl->cf_tree = RB_ROOT;
1104 1093
1105 sch_tree_lock(sch); 1094 sch_tree_lock(sch);
1106 list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]); 1095 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1107 list_add_tail(&cl->siblings, &parent->children); 1096 list_add_tail(&cl->siblings, &parent->children);
1108 if (parent->level == 0) 1097 if (parent->level == 0)
1109 hfsc_purge_queue(sch, parent); 1098 hfsc_purge_queue(sch, parent);
@@ -1111,9 +1100,11 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1111 cl->cl_pcvtoff = parent->cl_cvtoff; 1100 cl->cl_pcvtoff = parent->cl_cvtoff;
1112 sch_tree_unlock(sch); 1101 sch_tree_unlock(sch);
1113 1102
1103 qdisc_class_hash_grow(sch, &q->clhash);
1104
1114 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1115 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1116 &sch->dev->queue_lock, tca[TCA_RATE]); 1107 qdisc_root_lock(sch), tca[TCA_RATE]);
1117 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1118 return 0; 1109 return 0;
1119} 1110}
@@ -1145,7 +1136,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1145 hfsc_adjust_levels(cl->cl_parent); 1136 hfsc_adjust_levels(cl->cl_parent);
1146 1137
1147 hfsc_purge_queue(sch, cl); 1138 hfsc_purge_queue(sch, cl);
1148 list_del(&cl->hlist); 1139 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1149 1140
1150 if (--cl->refcnt == 0) 1141 if (--cl->refcnt == 0)
1151 hfsc_destroy_class(sch, cl); 1142 hfsc_destroy_class(sch, cl);
@@ -1211,8 +1202,9 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1211 if (cl->level > 0) 1202 if (cl->level > 0)
1212 return -EINVAL; 1203 return -EINVAL;
1213 if (new == NULL) { 1204 if (new == NULL) {
1214 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1205 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1215 cl->classid); 1206 &pfifo_qdisc_ops,
1207 cl->cl_common.classid);
1216 if (new == NULL) 1208 if (new == NULL)
1217 new = &noop_qdisc; 1209 new = &noop_qdisc;
1218 } 1210 }
@@ -1345,8 +1337,9 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1345 struct hfsc_class *cl = (struct hfsc_class *)arg; 1337 struct hfsc_class *cl = (struct hfsc_class *)arg;
1346 struct nlattr *nest; 1338 struct nlattr *nest;
1347 1339
1348 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; 1340 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1349 tcm->tcm_handle = cl->classid; 1341 TC_H_ROOT;
1342 tcm->tcm_handle = cl->cl_common.classid;
1350 if (cl->level == 0) 1343 if (cl->level == 0)
1351 tcm->tcm_info = cl->qdisc->handle; 1344 tcm->tcm_info = cl->qdisc->handle;
1352 1345
@@ -1390,14 +1383,16 @@ static void
1390hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1383hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1391{ 1384{
1392 struct hfsc_sched *q = qdisc_priv(sch); 1385 struct hfsc_sched *q = qdisc_priv(sch);
1386 struct hlist_node *n;
1393 struct hfsc_class *cl; 1387 struct hfsc_class *cl;
1394 unsigned int i; 1388 unsigned int i;
1395 1389
1396 if (arg->stop) 1390 if (arg->stop)
1397 return; 1391 return;
1398 1392
1399 for (i = 0; i < HFSC_HSIZE; i++) { 1393 for (i = 0; i < q->clhash.hashsize; i++) {
1400 list_for_each_entry(cl, &q->clhash[i], hlist) { 1394 hlist_for_each_entry(cl, n, &q->clhash.hash[i],
1395 cl_common.hnode) {
1401 if (arg->count < arg->skip) { 1396 if (arg->count < arg->skip) {
1402 arg->count++; 1397 arg->count++;
1403 continue; 1398 continue;
@@ -1433,23 +1428,25 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1433{ 1428{
1434 struct hfsc_sched *q = qdisc_priv(sch); 1429 struct hfsc_sched *q = qdisc_priv(sch);
1435 struct tc_hfsc_qopt *qopt; 1430 struct tc_hfsc_qopt *qopt;
1436 unsigned int i; 1431 int err;
1437 1432
1438 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1433 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1439 return -EINVAL; 1434 return -EINVAL;
1440 qopt = nla_data(opt); 1435 qopt = nla_data(opt);
1441 1436
1442 q->defcls = qopt->defcls; 1437 q->defcls = qopt->defcls;
1443 for (i = 0; i < HFSC_HSIZE; i++) 1438 err = qdisc_class_hash_init(&q->clhash);
1444 INIT_LIST_HEAD(&q->clhash[i]); 1439 if (err < 0)
1440 return err;
1445 q->eligible = RB_ROOT; 1441 q->eligible = RB_ROOT;
1446 INIT_LIST_HEAD(&q->droplist); 1442 INIT_LIST_HEAD(&q->droplist);
1447 skb_queue_head_init(&q->requeue); 1443 skb_queue_head_init(&q->requeue);
1448 1444
1445 q->root.cl_common.classid = sch->handle;
1449 q->root.refcnt = 1; 1446 q->root.refcnt = 1;
1450 q->root.classid = sch->handle;
1451 q->root.sched = q; 1447 q->root.sched = q;
1452 q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1448 q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1449 &pfifo_qdisc_ops,
1453 sch->handle); 1450 sch->handle);
1454 if (q->root.qdisc == NULL) 1451 if (q->root.qdisc == NULL)
1455 q->root.qdisc = &noop_qdisc; 1452 q->root.qdisc = &noop_qdisc;
@@ -1457,7 +1454,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1457 q->root.vt_tree = RB_ROOT; 1454 q->root.vt_tree = RB_ROOT;
1458 q->root.cf_tree = RB_ROOT; 1455 q->root.cf_tree = RB_ROOT;
1459 1456
1460 list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]); 1457 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1458 qdisc_class_hash_grow(sch, &q->clhash);
1461 1459
1462 qdisc_watchdog_init(&q->watchdog, sch); 1460 qdisc_watchdog_init(&q->watchdog, sch);
1463 1461
@@ -1520,10 +1518,11 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1520{ 1518{
1521 struct hfsc_sched *q = qdisc_priv(sch); 1519 struct hfsc_sched *q = qdisc_priv(sch);
1522 struct hfsc_class *cl; 1520 struct hfsc_class *cl;
1521 struct hlist_node *n;
1523 unsigned int i; 1522 unsigned int i;
1524 1523
1525 for (i = 0; i < HFSC_HSIZE; i++) { 1524 for (i = 0; i < q->clhash.hashsize; i++) {
1526 list_for_each_entry(cl, &q->clhash[i], hlist) 1525 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1527 hfsc_reset_class(cl); 1526 hfsc_reset_class(cl);
1528 } 1527 }
1529 __skb_queue_purge(&q->requeue); 1528 __skb_queue_purge(&q->requeue);
@@ -1537,17 +1536,20 @@ static void
1537hfsc_destroy_qdisc(struct Qdisc *sch) 1536hfsc_destroy_qdisc(struct Qdisc *sch)
1538{ 1537{
1539 struct hfsc_sched *q = qdisc_priv(sch); 1538 struct hfsc_sched *q = qdisc_priv(sch);
1540 struct hfsc_class *cl, *next; 1539 struct hlist_node *n, *next;
1540 struct hfsc_class *cl;
1541 unsigned int i; 1541 unsigned int i;
1542 1542
1543 for (i = 0; i < HFSC_HSIZE; i++) { 1543 for (i = 0; i < q->clhash.hashsize; i++) {
1544 list_for_each_entry(cl, &q->clhash[i], hlist) 1544 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1545 tcf_destroy_chain(&cl->filter_list); 1545 tcf_destroy_chain(&cl->filter_list);
1546 } 1546 }
1547 for (i = 0; i < HFSC_HSIZE; i++) { 1547 for (i = 0; i < q->clhash.hashsize; i++) {
1548 list_for_each_entry_safe(cl, next, &q->clhash[i], hlist) 1548 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1549 cl_common.hnode)
1549 hfsc_destroy_class(sch, cl); 1550 hfsc_destroy_class(sch, cl);
1550 } 1551 }
1552 qdisc_class_hash_destroy(&q->clhash);
1551 __skb_queue_purge(&q->requeue); 1553 __skb_queue_purge(&q->requeue);
1552 qdisc_watchdog_cancel(&q->watchdog); 1554 qdisc_watchdog_cancel(&q->watchdog);
1553} 1555}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 3fb58f428f72..ee48457eaa4a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -24,8 +24,6 @@
24 * Jiri Fojtasek 24 * Jiri Fojtasek
25 * fixed requeue routine 25 * fixed requeue routine
26 * and many others. thanks. 26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */ 27 */
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
@@ -53,7 +51,6 @@
53 one less than their parent. 51 one less than their parent.
54*/ 52*/
55 53
56#define HTB_HSIZE 16 /* classid hash size */
57static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ 54static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
58#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ 55#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
59 56
@@ -74,8 +71,8 @@ enum htb_cmode {
74 71
75/* interior & leaf nodes; props specific to leaves are marked L: */ 72/* interior & leaf nodes; props specific to leaves are marked L: */
76struct htb_class { 73struct htb_class {
74 struct Qdisc_class_common common;
77 /* general class parameters */ 75 /* general class parameters */
78 u32 classid;
79 struct gnet_stats_basic bstats; 76 struct gnet_stats_basic bstats;
80 struct gnet_stats_queue qstats; 77 struct gnet_stats_queue qstats;
81 struct gnet_stats_rate_est rate_est; 78 struct gnet_stats_rate_est rate_est;
@@ -84,10 +81,8 @@ struct htb_class {
84 81
85 /* topology */ 82 /* topology */
86 int level; /* our level (see above) */ 83 int level; /* our level (see above) */
84 unsigned int children;
87 struct htb_class *parent; /* parent class */ 85 struct htb_class *parent; /* parent class */
88 struct hlist_node hlist; /* classid hash list item */
89 struct list_head sibling; /* sibling list item */
90 struct list_head children; /* children list */
91 86
92 union { 87 union {
93 struct htb_class_leaf { 88 struct htb_class_leaf {
@@ -142,8 +137,7 @@ static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
142} 137}
143 138
144struct htb_sched { 139struct htb_sched {
145 struct list_head root; /* root classes list */ 140 struct Qdisc_class_hash clhash;
146 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
147 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ 141 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
148 142
149 /* self list - roots of self generating tree */ 143 /* self list - roots of self generating tree */
@@ -165,7 +159,6 @@ struct htb_sched {
165 159
166 /* filters for qdisc itself */ 160 /* filters for qdisc itself */
167 struct tcf_proto *filter_list; 161 struct tcf_proto *filter_list;
168 int filter_cnt;
169 162
170 int rate2quantum; /* quant = rate / rate2quantum */ 163 int rate2quantum; /* quant = rate / rate2quantum */
171 psched_time_t now; /* cached dequeue time */ 164 psched_time_t now; /* cached dequeue time */
@@ -178,32 +171,16 @@ struct htb_sched {
178 long direct_pkts; 171 long direct_pkts;
179}; 172};
180 173
181/* compute hash of size HTB_HSIZE for given handle */
182static inline int htb_hash(u32 h)
183{
184#if HTB_HSIZE != 16
185#error "Declare new hash for your HTB_HSIZE"
186#endif
187 h ^= h >> 8; /* stolen from cbq_hash */
188 h ^= h >> 4;
189 return h & 0xf;
190}
191
192/* find class in global hash table using given handle */ 174/* find class in global hash table using given handle */
193static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 175static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
194{ 176{
195 struct htb_sched *q = qdisc_priv(sch); 177 struct htb_sched *q = qdisc_priv(sch);
196 struct hlist_node *p; 178 struct Qdisc_class_common *clc;
197 struct htb_class *cl;
198 179
199 if (TC_H_MAJ(handle) != sch->handle) 180 clc = qdisc_class_find(&q->clhash, handle);
181 if (clc == NULL)
200 return NULL; 182 return NULL;
201 183 return container_of(clc, struct htb_class, common);
202 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
203 if (cl->classid == handle)
204 return cl;
205 }
206 return NULL;
207} 184}
208 185
209/** 186/**
@@ -284,7 +261,7 @@ static void htb_add_to_id_tree(struct rb_root *root,
284 parent = *p; 261 parent = *p;
285 c = rb_entry(parent, struct htb_class, node[prio]); 262 c = rb_entry(parent, struct htb_class, node[prio]);
286 263
287 if (cl->classid > c->classid) 264 if (cl->common.classid > c->common.classid)
288 p = &parent->rb_right; 265 p = &parent->rb_right;
289 else 266 else
290 p = &parent->rb_left; 267 p = &parent->rb_left;
@@ -448,7 +425,7 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
448 /* we are removing child which is pointed to from 425 /* we are removing child which is pointed to from
449 parent feed - forget the pointer but remember 426 parent feed - forget the pointer but remember
450 classid */ 427 classid */
451 p->un.inner.last_ptr_id[prio] = cl->classid; 428 p->un.inner.last_ptr_id[prio] = cl->common.classid;
452 p->un.inner.ptr[prio] = NULL; 429 p->un.inner.ptr[prio] = NULL;
453 } 430 }
454 431
@@ -753,10 +730,10 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
753 while (n) { 730 while (n) {
754 struct htb_class *cl = 731 struct htb_class *cl =
755 rb_entry(n, struct htb_class, node[prio]); 732 rb_entry(n, struct htb_class, node[prio]);
756 if (id == cl->classid) 733 if (id == cl->common.classid)
757 return n; 734 return n;
758 735
759 if (id > cl->classid) { 736 if (id > cl->common.classid) {
760 n = n->rb_right; 737 n = n->rb_right;
761 } else { 738 } else {
762 r = n; 739 r = n;
@@ -866,7 +843,7 @@ next:
866 if (!cl->warned) { 843 if (!cl->warned) {
867 printk(KERN_WARNING 844 printk(KERN_WARNING
868 "htb: class %X isn't work conserving ?!\n", 845 "htb: class %X isn't work conserving ?!\n",
869 cl->classid); 846 cl->common.classid);
870 cl->warned = 1; 847 cl->warned = 1;
871 } 848 }
872 q->nwc_hit++; 849 q->nwc_hit++;
@@ -977,13 +954,12 @@ static unsigned int htb_drop(struct Qdisc *sch)
977static void htb_reset(struct Qdisc *sch) 954static void htb_reset(struct Qdisc *sch)
978{ 955{
979 struct htb_sched *q = qdisc_priv(sch); 956 struct htb_sched *q = qdisc_priv(sch);
980 int i; 957 struct htb_class *cl;
981 958 struct hlist_node *n;
982 for (i = 0; i < HTB_HSIZE; i++) { 959 unsigned int i;
983 struct hlist_node *p;
984 struct htb_class *cl;
985 960
986 hlist_for_each_entry(cl, p, q->hash + i, hlist) { 961 for (i = 0; i < q->clhash.hashsize; i++) {
962 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
987 if (cl->level) 963 if (cl->level)
988 memset(&cl->un.inner, 0, sizeof(cl->un.inner)); 964 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
989 else { 965 else {
@@ -1041,16 +1017,16 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1041 return -EINVAL; 1017 return -EINVAL;
1042 } 1018 }
1043 1019
1044 INIT_LIST_HEAD(&q->root); 1020 err = qdisc_class_hash_init(&q->clhash);
1045 for (i = 0; i < HTB_HSIZE; i++) 1021 if (err < 0)
1046 INIT_HLIST_HEAD(q->hash + i); 1022 return err;
1047 for (i = 0; i < TC_HTB_NUMPRIO; i++) 1023 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1048 INIT_LIST_HEAD(q->drops + i); 1024 INIT_LIST_HEAD(q->drops + i);
1049 1025
1050 qdisc_watchdog_init(&q->watchdog, sch); 1026 qdisc_watchdog_init(&q->watchdog, sch);
1051 skb_queue_head_init(&q->direct_queue); 1027 skb_queue_head_init(&q->direct_queue);
1052 1028
1053 q->direct_qlen = sch->dev->tx_queue_len; 1029 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1054 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1030 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1055 q->direct_qlen = 2; 1031 q->direct_qlen = 2;
1056 1032
@@ -1063,11 +1039,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1063 1039
1064static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1040static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1065{ 1041{
1042 spinlock_t *root_lock = qdisc_root_lock(sch);
1066 struct htb_sched *q = qdisc_priv(sch); 1043 struct htb_sched *q = qdisc_priv(sch);
1067 struct nlattr *nest; 1044 struct nlattr *nest;
1068 struct tc_htb_glob gopt; 1045 struct tc_htb_glob gopt;
1069 1046
1070 spin_lock_bh(&sch->dev->queue_lock); 1047 spin_lock_bh(root_lock);
1071 1048
1072 gopt.direct_pkts = q->direct_pkts; 1049 gopt.direct_pkts = q->direct_pkts;
1073 gopt.version = HTB_VER; 1050 gopt.version = HTB_VER;
@@ -1081,11 +1058,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1081 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1058 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1082 nla_nest_end(skb, nest); 1059 nla_nest_end(skb, nest);
1083 1060
1084 spin_unlock_bh(&sch->dev->queue_lock); 1061 spin_unlock_bh(root_lock);
1085 return skb->len; 1062 return skb->len;
1086 1063
1087nla_put_failure: 1064nla_put_failure:
1088 spin_unlock_bh(&sch->dev->queue_lock); 1065 spin_unlock_bh(root_lock);
1089 nla_nest_cancel(skb, nest); 1066 nla_nest_cancel(skb, nest);
1090 return -1; 1067 return -1;
1091} 1068}
@@ -1094,12 +1071,13 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1094 struct sk_buff *skb, struct tcmsg *tcm) 1071 struct sk_buff *skb, struct tcmsg *tcm)
1095{ 1072{
1096 struct htb_class *cl = (struct htb_class *)arg; 1073 struct htb_class *cl = (struct htb_class *)arg;
1074 spinlock_t *root_lock = qdisc_root_lock(sch);
1097 struct nlattr *nest; 1075 struct nlattr *nest;
1098 struct tc_htb_opt opt; 1076 struct tc_htb_opt opt;
1099 1077
1100 spin_lock_bh(&sch->dev->queue_lock); 1078 spin_lock_bh(root_lock);
1101 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; 1079 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1102 tcm->tcm_handle = cl->classid; 1080 tcm->tcm_handle = cl->common.classid;
1103 if (!cl->level && cl->un.leaf.q) 1081 if (!cl->level && cl->un.leaf.q)
1104 tcm->tcm_info = cl->un.leaf.q->handle; 1082 tcm->tcm_info = cl->un.leaf.q->handle;
1105 1083
@@ -1119,11 +1097,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1119 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1097 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1120 1098
1121 nla_nest_end(skb, nest); 1099 nla_nest_end(skb, nest);
1122 spin_unlock_bh(&sch->dev->queue_lock); 1100 spin_unlock_bh(root_lock);
1123 return skb->len; 1101 return skb->len;
1124 1102
1125nla_put_failure: 1103nla_put_failure:
1126 spin_unlock_bh(&sch->dev->queue_lock); 1104 spin_unlock_bh(root_lock);
1127 nla_nest_cancel(skb, nest); 1105 nla_nest_cancel(skb, nest);
1128 return -1; 1106 return -1;
1129} 1107}
@@ -1153,8 +1131,9 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1153 1131
1154 if (cl && !cl->level) { 1132 if (cl && !cl->level) {
1155 if (new == NULL && 1133 if (new == NULL &&
1156 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1134 (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1157 cl->classid)) 1135 &pfifo_qdisc_ops,
1136 cl->common.classid))
1158 == NULL) 1137 == NULL)
1159 return -ENOBUFS; 1138 return -ENOBUFS;
1160 sch_tree_lock(sch); 1139 sch_tree_lock(sch);
@@ -1195,12 +1174,9 @@ static inline int htb_parent_last_child(struct htb_class *cl)
1195 if (!cl->parent) 1174 if (!cl->parent)
1196 /* the root class */ 1175 /* the root class */
1197 return 0; 1176 return 0;
1198 1177 if (cl->parent->children > 1)
1199 if (!(cl->parent->children.next == &cl->sibling &&
1200 cl->parent->children.prev == &cl->sibling))
1201 /* not the last child */ 1178 /* not the last child */
1202 return 0; 1179 return 0;
1203
1204 return 1; 1180 return 1;
1205} 1181}
1206 1182
@@ -1228,8 +1204,6 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1228 1204
1229static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) 1205static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1230{ 1206{
1231 struct htb_sched *q = qdisc_priv(sch);
1232
1233 if (!cl->level) { 1207 if (!cl->level) {
1234 BUG_TRAP(cl->un.leaf.q); 1208 BUG_TRAP(cl->un.leaf.q);
1235 qdisc_destroy(cl->un.leaf.q); 1209 qdisc_destroy(cl->un.leaf.q);
@@ -1239,21 +1213,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1239 qdisc_put_rtab(cl->ceil); 1213 qdisc_put_rtab(cl->ceil);
1240 1214
1241 tcf_destroy_chain(&cl->filter_list); 1215 tcf_destroy_chain(&cl->filter_list);
1242
1243 while (!list_empty(&cl->children))
1244 htb_destroy_class(sch, list_entry(cl->children.next,
1245 struct htb_class, sibling));
1246
1247 /* note: this delete may happen twice (see htb_delete) */
1248 hlist_del_init(&cl->hlist);
1249 list_del(&cl->sibling);
1250
1251 if (cl->prio_activity)
1252 htb_deactivate(q, cl);
1253
1254 if (cl->cmode != HTB_CAN_SEND)
1255 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1256
1257 kfree(cl); 1216 kfree(cl);
1258} 1217}
1259 1218
@@ -1261,6 +1220,9 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1261static void htb_destroy(struct Qdisc *sch) 1220static void htb_destroy(struct Qdisc *sch)
1262{ 1221{
1263 struct htb_sched *q = qdisc_priv(sch); 1222 struct htb_sched *q = qdisc_priv(sch);
1223 struct hlist_node *n, *next;
1224 struct htb_class *cl;
1225 unsigned int i;
1264 1226
1265 qdisc_watchdog_cancel(&q->watchdog); 1227 qdisc_watchdog_cancel(&q->watchdog);
1266 /* This line used to be after htb_destroy_class call below 1228 /* This line used to be after htb_destroy_class call below
@@ -1269,10 +1231,16 @@ static void htb_destroy(struct Qdisc *sch)
1269 unbind_filter on it (without Oops). */ 1231 unbind_filter on it (without Oops). */
1270 tcf_destroy_chain(&q->filter_list); 1232 tcf_destroy_chain(&q->filter_list);
1271 1233
1272 while (!list_empty(&q->root)) 1234 for (i = 0; i < q->clhash.hashsize; i++) {
1273 htb_destroy_class(sch, list_entry(q->root.next, 1235 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1274 struct htb_class, sibling)); 1236 tcf_destroy_chain(&cl->filter_list);
1275 1237 }
1238 for (i = 0; i < q->clhash.hashsize; i++) {
1239 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1240 common.hnode)
1241 htb_destroy_class(sch, cl);
1242 }
1243 qdisc_class_hash_destroy(&q->clhash);
1276 __skb_queue_purge(&q->direct_queue); 1244 __skb_queue_purge(&q->direct_queue);
1277} 1245}
1278 1246
@@ -1287,12 +1255,13 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1287 // TODO: why don't allow to delete subtree ? references ? does 1255 // TODO: why don't allow to delete subtree ? references ? does
1288 // tc subsys quarantee us that in htb_destroy it holds no class 1256 // tc subsys quarantee us that in htb_destroy it holds no class
1289 // refs so that we can remove children safely there ? 1257 // refs so that we can remove children safely there ?
1290 if (!list_empty(&cl->children) || cl->filter_cnt) 1258 if (cl->children || cl->filter_cnt)
1291 return -EBUSY; 1259 return -EBUSY;
1292 1260
1293 if (!cl->level && htb_parent_last_child(cl)) { 1261 if (!cl->level && htb_parent_last_child(cl)) {
1294 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1262 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1295 cl->parent->classid); 1263 &pfifo_qdisc_ops,
1264 cl->parent->common.classid);
1296 last_child = 1; 1265 last_child = 1;
1297 } 1266 }
1298 1267
@@ -1305,11 +1274,15 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1305 } 1274 }
1306 1275
1307 /* delete from hash and active; remainder in destroy_class */ 1276 /* delete from hash and active; remainder in destroy_class */
1308 hlist_del_init(&cl->hlist); 1277 qdisc_class_hash_remove(&q->clhash, &cl->common);
1278 cl->parent->children--;
1309 1279
1310 if (cl->prio_activity) 1280 if (cl->prio_activity)
1311 htb_deactivate(q, cl); 1281 htb_deactivate(q, cl);
1312 1282
1283 if (cl->cmode != HTB_CAN_SEND)
1284 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1285
1313 if (last_child) 1286 if (last_child)
1314 htb_parent_to_leaf(q, cl, new_q); 1287 htb_parent_to_leaf(q, cl, new_q);
1315 1288
@@ -1394,12 +1367,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1394 goto failure; 1367 goto failure;
1395 1368
1396 gen_new_estimator(&cl->bstats, &cl->rate_est, 1369 gen_new_estimator(&cl->bstats, &cl->rate_est,
1397 &sch->dev->queue_lock, 1370 qdisc_root_lock(sch),
1398 tca[TCA_RATE] ? : &est.nla); 1371 tca[TCA_RATE] ? : &est.nla);
1399 cl->refcnt = 1; 1372 cl->refcnt = 1;
1400 INIT_LIST_HEAD(&cl->sibling); 1373 cl->children = 0;
1401 INIT_HLIST_NODE(&cl->hlist);
1402 INIT_LIST_HEAD(&cl->children);
1403 INIT_LIST_HEAD(&cl->un.leaf.drop_list); 1374 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1404 RB_CLEAR_NODE(&cl->pq_node); 1375 RB_CLEAR_NODE(&cl->pq_node);
1405 1376
@@ -1409,7 +1380,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1409 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) 1380 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1410 so that can't be used inside of sch_tree_lock 1381 so that can't be used inside of sch_tree_lock
1411 -- thanks to Karlis Peisenieks */ 1382 -- thanks to Karlis Peisenieks */
1412 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 1383 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1384 &pfifo_qdisc_ops, classid);
1413 sch_tree_lock(sch); 1385 sch_tree_lock(sch);
1414 if (parent && !parent->level) { 1386 if (parent && !parent->level) {
1415 unsigned int qlen = parent->un.leaf.q->q.qlen; 1387 unsigned int qlen = parent->un.leaf.q->q.qlen;
@@ -1433,7 +1405,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1433 /* leaf (we) needs elementary qdisc */ 1405 /* leaf (we) needs elementary qdisc */
1434 cl->un.leaf.q = new_q ? new_q : &noop_qdisc; 1406 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1435 1407
1436 cl->classid = classid; 1408 cl->common.classid = classid;
1437 cl->parent = parent; 1409 cl->parent = parent;
1438 1410
1439 /* set class to be in HTB_CAN_SEND state */ 1411 /* set class to be in HTB_CAN_SEND state */
@@ -1444,13 +1416,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1444 cl->cmode = HTB_CAN_SEND; 1416 cl->cmode = HTB_CAN_SEND;
1445 1417
1446 /* attach to the hash list and parent's family */ 1418 /* attach to the hash list and parent's family */
1447 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); 1419 qdisc_class_hash_insert(&q->clhash, &cl->common);
1448 list_add_tail(&cl->sibling, 1420 if (parent)
1449 parent ? &parent->children : &q->root); 1421 parent->children++;
1450 } else { 1422 } else {
1451 if (tca[TCA_RATE]) 1423 if (tca[TCA_RATE])
1452 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1424 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1453 &sch->dev->queue_lock, 1425 qdisc_root_lock(sch),
1454 tca[TCA_RATE]); 1426 tca[TCA_RATE]);
1455 sch_tree_lock(sch); 1427 sch_tree_lock(sch);
1456 } 1428 }
@@ -1462,13 +1434,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1462 if (!hopt->quantum && cl->un.leaf.quantum < 1000) { 1434 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1463 printk(KERN_WARNING 1435 printk(KERN_WARNING
1464 "HTB: quantum of class %X is small. Consider r2q change.\n", 1436 "HTB: quantum of class %X is small. Consider r2q change.\n",
1465 cl->classid); 1437 cl->common.classid);
1466 cl->un.leaf.quantum = 1000; 1438 cl->un.leaf.quantum = 1000;
1467 } 1439 }
1468 if (!hopt->quantum && cl->un.leaf.quantum > 200000) { 1440 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1469 printk(KERN_WARNING 1441 printk(KERN_WARNING
1470 "HTB: quantum of class %X is big. Consider r2q change.\n", 1442 "HTB: quantum of class %X is big. Consider r2q change.\n",
1471 cl->classid); 1443 cl->common.classid);
1472 cl->un.leaf.quantum = 200000; 1444 cl->un.leaf.quantum = 200000;
1473 } 1445 }
1474 if (hopt->quantum) 1446 if (hopt->quantum)
@@ -1491,6 +1463,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1491 cl->ceil = ctab; 1463 cl->ceil = ctab;
1492 sch_tree_unlock(sch); 1464 sch_tree_unlock(sch);
1493 1465
1466 qdisc_class_hash_grow(sch, &q->clhash);
1467
1494 *arg = (unsigned long)cl; 1468 *arg = (unsigned long)cl;
1495 return 0; 1469 return 0;
1496 1470
@@ -1514,7 +1488,6 @@ static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1514static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, 1488static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1515 u32 classid) 1489 u32 classid)
1516{ 1490{
1517 struct htb_sched *q = qdisc_priv(sch);
1518 struct htb_class *cl = htb_find(classid, sch); 1491 struct htb_class *cl = htb_find(classid, sch);
1519 1492
1520 /*if (cl && !cl->level) return 0; 1493 /*if (cl && !cl->level) return 0;
@@ -1528,35 +1501,29 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1528 */ 1501 */
1529 if (cl) 1502 if (cl)
1530 cl->filter_cnt++; 1503 cl->filter_cnt++;
1531 else
1532 q->filter_cnt++;
1533 return (unsigned long)cl; 1504 return (unsigned long)cl;
1534} 1505}
1535 1506
1536static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) 1507static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1537{ 1508{
1538 struct htb_sched *q = qdisc_priv(sch);
1539 struct htb_class *cl = (struct htb_class *)arg; 1509 struct htb_class *cl = (struct htb_class *)arg;
1540 1510
1541 if (cl) 1511 if (cl)
1542 cl->filter_cnt--; 1512 cl->filter_cnt--;
1543 else
1544 q->filter_cnt--;
1545} 1513}
1546 1514
1547static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1515static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1548{ 1516{
1549 struct htb_sched *q = qdisc_priv(sch); 1517 struct htb_sched *q = qdisc_priv(sch);
1550 int i; 1518 struct htb_class *cl;
1519 struct hlist_node *n;
1520 unsigned int i;
1551 1521
1552 if (arg->stop) 1522 if (arg->stop)
1553 return; 1523 return;
1554 1524
1555 for (i = 0; i < HTB_HSIZE; i++) { 1525 for (i = 0; i < q->clhash.hashsize; i++) {
1556 struct hlist_node *p; 1526 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1557 struct htb_class *cl;
1558
1559 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1560 if (arg->count < arg->skip) { 1527 if (arg->count < arg->skip) {
1561 arg->count++; 1528 arg->count++;
1562 continue; 1529 continue;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c9c649b26eaa..c5ea40c9eb21 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -180,7 +180,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
180 * skb will be queued. 180 * skb will be queued.
181 */ 181 */
182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 struct Qdisc *rootq = sch->dev->qdisc; 183 struct Qdisc *rootq = qdisc_root(sch);
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0; 185 q->duplicate = 0;
186 186
@@ -310,28 +310,6 @@ static void netem_reset(struct Qdisc *sch)
310 qdisc_watchdog_cancel(&q->watchdog); 310 qdisc_watchdog_cancel(&q->watchdog);
311} 311}
312 312
313/* Pass size change message down to embedded FIFO */
314static int set_fifo_limit(struct Qdisc *q, int limit)
315{
316 struct nlattr *nla;
317 int ret = -ENOMEM;
318
319 /* Hack to avoid sending change message to non-FIFO */
320 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
321 return 0;
322
323 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
324 if (nla) {
325 nla->nla_type = RTM_NEWQDISC;
326 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
327 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
328
329 ret = q->ops->change(q, nla);
330 kfree(nla);
331 }
332 return ret;
333}
334
335/* 313/*
336 * Distribution data is a variable size payload containing 314 * Distribution data is a variable size payload containing
337 * signed 16 bit values. 315 * signed 16 bit values.
@@ -341,6 +319,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
341 struct netem_sched_data *q = qdisc_priv(sch); 319 struct netem_sched_data *q = qdisc_priv(sch);
342 unsigned long n = nla_len(attr)/sizeof(__s16); 320 unsigned long n = nla_len(attr)/sizeof(__s16);
343 const __s16 *data = nla_data(attr); 321 const __s16 *data = nla_data(attr);
322 spinlock_t *root_lock;
344 struct disttable *d; 323 struct disttable *d;
345 int i; 324 int i;
346 325
@@ -355,9 +334,11 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
355 for (i = 0; i < n; i++) 334 for (i = 0; i < n; i++)
356 d->table[i] = data[i]; 335 d->table[i] = data[i];
357 336
358 spin_lock_bh(&sch->dev->queue_lock); 337 root_lock = qdisc_root_lock(sch);
338
339 spin_lock_bh(root_lock);
359 d = xchg(&q->delay_dist, d); 340 d = xchg(&q->delay_dist, d);
360 spin_unlock_bh(&sch->dev->queue_lock); 341 spin_unlock_bh(root_lock);
361 342
362 kfree(d); 343 kfree(d);
363 return 0; 344 return 0;
@@ -416,7 +397,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
416 if (ret < 0) 397 if (ret < 0)
417 return ret; 398 return ret;
418 399
419 ret = set_fifo_limit(q->qdisc, qopt->limit); 400 ret = fifo_set_limit(q->qdisc, qopt->limit);
420 if (ret) { 401 if (ret) {
421 pr_debug("netem: can't set fifo limit\n"); 402 pr_debug("netem: can't set fifo limit\n");
422 return ret; 403 return ret;
@@ -517,7 +498,7 @@ static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
517 498
518 q->limit = ctl->limit; 499 q->limit = ctl->limit;
519 } else 500 } else
520 q->limit = max_t(u32, sch->dev->tx_queue_len, 1); 501 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
521 502
522 q->oldest = PSCHED_PASTPERFECT; 503 q->oldest = PSCHED_PASTPERFECT;
523 return 0; 504 return 0;
@@ -558,7 +539,8 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
558 539
559 qdisc_watchdog_init(&q->watchdog, sch); 540 qdisc_watchdog_init(&q->watchdog, sch);
560 541
561 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops, 542 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
543 &tfifo_qdisc_ops,
562 TC_H_MAKE(sch->handle, 1)); 544 TC_H_MAKE(sch->handle, 1));
563 if (!q->qdisc) { 545 if (!q->qdisc) {
564 pr_debug("netem: qdisc create failed\n"); 546 pr_debug("netem: qdisc create failed\n");
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 5532f1031ab5..536ca474dc69 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -24,11 +24,9 @@
24struct prio_sched_data 24struct prio_sched_data
25{ 25{
26 int bands; 26 int bands;
27 int curband; /* for round-robin */
28 struct tcf_proto *filter_list; 27 struct tcf_proto *filter_list;
29 u8 prio2band[TC_PRIO_MAX+1]; 28 u8 prio2band[TC_PRIO_MAX+1];
30 struct Qdisc *queues[TCQ_PRIO_BANDS]; 29 struct Qdisc *queues[TCQ_PRIO_BANDS];
31 int mq;
32}; 30};
33 31
34 32
@@ -55,17 +53,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
55 if (!q->filter_list || err < 0) { 53 if (!q->filter_list || err < 0) {
56 if (TC_H_MAJ(band)) 54 if (TC_H_MAJ(band))
57 band = 0; 55 band = 0;
58 band = q->prio2band[band&TC_PRIO_MAX]; 56 return q->queues[q->prio2band[band&TC_PRIO_MAX]];
59 goto out;
60 } 57 }
61 band = res.classid; 58 band = res.classid;
62 } 59 }
63 band = TC_H_MIN(band) - 1; 60 band = TC_H_MIN(band) - 1;
64 if (band >= q->bands) 61 if (band >= q->bands)
65 band = q->prio2band[0]; 62 return q->queues[q->prio2band[0]];
66out: 63
67 if (q->mq)
68 skb_set_queue_mapping(skb, band);
69 return q->queues[band]; 64 return q->queues[band];
70} 65}
71 66
@@ -123,67 +118,23 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
123} 118}
124 119
125 120
126static struct sk_buff * 121static struct sk_buff *prio_dequeue(struct Qdisc* sch)
127prio_dequeue(struct Qdisc* sch)
128{ 122{
129 struct sk_buff *skb;
130 struct prio_sched_data *q = qdisc_priv(sch); 123 struct prio_sched_data *q = qdisc_priv(sch);
131 int prio; 124 int prio;
132 struct Qdisc *qdisc;
133 125
134 for (prio = 0; prio < q->bands; prio++) { 126 for (prio = 0; prio < q->bands; prio++) {
135 /* Check if the target subqueue is available before 127 struct Qdisc *qdisc = q->queues[prio];
136 * pulling an skb. This way we avoid excessive requeues 128 struct sk_buff *skb = qdisc->dequeue(qdisc);
137 * for slower queues. 129 if (skb) {
138 */ 130 sch->q.qlen--;
139 if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { 131 return skb;
140 qdisc = q->queues[prio];
141 skb = qdisc->dequeue(qdisc);
142 if (skb) {
143 sch->q.qlen--;
144 return skb;
145 }
146 } 132 }
147 } 133 }
148 return NULL; 134 return NULL;
149 135
150} 136}
151 137
152static struct sk_buff *rr_dequeue(struct Qdisc* sch)
153{
154 struct sk_buff *skb;
155 struct prio_sched_data *q = qdisc_priv(sch);
156 struct Qdisc *qdisc;
157 int bandcount;
158
159 /* Only take one pass through the queues. If nothing is available,
160 * return nothing.
161 */
162 for (bandcount = 0; bandcount < q->bands; bandcount++) {
163 /* Check if the target subqueue is available before
164 * pulling an skb. This way we avoid excessive requeues
165 * for slower queues. If the queue is stopped, try the
166 * next queue.
167 */
168 if (!__netif_subqueue_stopped(sch->dev,
169 (q->mq ? q->curband : 0))) {
170 qdisc = q->queues[q->curband];
171 skb = qdisc->dequeue(qdisc);
172 if (skb) {
173 sch->q.qlen--;
174 q->curband++;
175 if (q->curband >= q->bands)
176 q->curband = 0;
177 return skb;
178 }
179 }
180 q->curband++;
181 if (q->curband >= q->bands)
182 q->curband = 0;
183 }
184 return NULL;
185}
186
187static unsigned int prio_drop(struct Qdisc* sch) 138static unsigned int prio_drop(struct Qdisc* sch)
188{ 139{
189 struct prio_sched_data *q = qdisc_priv(sch); 140 struct prio_sched_data *q = qdisc_priv(sch);
@@ -228,45 +179,22 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
228{ 179{
229 struct prio_sched_data *q = qdisc_priv(sch); 180 struct prio_sched_data *q = qdisc_priv(sch);
230 struct tc_prio_qopt *qopt; 181 struct tc_prio_qopt *qopt;
231 struct nlattr *tb[TCA_PRIO_MAX + 1];
232 int err;
233 int i; 182 int i;
234 183
235 err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt, 184 if (nla_len(opt) < sizeof(*qopt))
236 sizeof(*qopt)); 185 return -EINVAL;
237 if (err < 0) 186 qopt = nla_data(opt);
238 return err;
239
240 q->bands = qopt->bands;
241 /* If we're multiqueue, make sure the number of incoming bands
242 * matches the number of queues on the device we're associating with.
243 * If the number of bands requested is zero, then set q->bands to
244 * dev->egress_subqueue_count. Also, the root qdisc must be the
245 * only one that is enabled for multiqueue, since it's the only one
246 * that interacts with the underlying device.
247 */
248 q->mq = nla_get_flag(tb[TCA_PRIO_MQ]);
249 if (q->mq) {
250 if (sch->parent != TC_H_ROOT)
251 return -EINVAL;
252 if (netif_is_multiqueue(sch->dev)) {
253 if (q->bands == 0)
254 q->bands = sch->dev->egress_subqueue_count;
255 else if (q->bands != sch->dev->egress_subqueue_count)
256 return -EINVAL;
257 } else
258 return -EOPNOTSUPP;
259 }
260 187
261 if (q->bands > TCQ_PRIO_BANDS || q->bands < 2) 188 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
262 return -EINVAL; 189 return -EINVAL;
263 190
264 for (i=0; i<=TC_PRIO_MAX; i++) { 191 for (i=0; i<=TC_PRIO_MAX; i++) {
265 if (qopt->priomap[i] >= q->bands) 192 if (qopt->priomap[i] >= qopt->bands)
266 return -EINVAL; 193 return -EINVAL;
267 } 194 }
268 195
269 sch_tree_lock(sch); 196 sch_tree_lock(sch);
197 q->bands = qopt->bands;
270 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 198 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
271 199
272 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { 200 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
@@ -281,7 +209,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
281 for (i=0; i<q->bands; i++) { 209 for (i=0; i<q->bands; i++) {
282 if (q->queues[i] == &noop_qdisc) { 210 if (q->queues[i] == &noop_qdisc) {
283 struct Qdisc *child; 211 struct Qdisc *child;
284 child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 212 child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
213 &pfifo_qdisc_ops,
285 TC_H_MAKE(sch->handle, i + 1)); 214 TC_H_MAKE(sch->handle, i + 1));
286 if (child) { 215 if (child) {
287 sch_tree_lock(sch); 216 sch_tree_lock(sch);
@@ -331,10 +260,6 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
331 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); 260 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt);
332 if (nest == NULL) 261 if (nest == NULL)
333 goto nla_put_failure; 262 goto nla_put_failure;
334 if (q->mq) {
335 if (nla_put_flag(skb, TCA_PRIO_MQ) < 0)
336 goto nla_put_failure;
337 }
338 nla_nest_compat_end(skb, nest); 263 nla_nest_compat_end(skb, nest);
339 264
340 return skb->len; 265 return skb->len;
@@ -507,44 +432,17 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
507 .owner = THIS_MODULE, 432 .owner = THIS_MODULE,
508}; 433};
509 434
510static struct Qdisc_ops rr_qdisc_ops __read_mostly = {
511 .next = NULL,
512 .cl_ops = &prio_class_ops,
513 .id = "rr",
514 .priv_size = sizeof(struct prio_sched_data),
515 .enqueue = prio_enqueue,
516 .dequeue = rr_dequeue,
517 .requeue = prio_requeue,
518 .drop = prio_drop,
519 .init = prio_init,
520 .reset = prio_reset,
521 .destroy = prio_destroy,
522 .change = prio_tune,
523 .dump = prio_dump,
524 .owner = THIS_MODULE,
525};
526
527static int __init prio_module_init(void) 435static int __init prio_module_init(void)
528{ 436{
529 int err; 437 return register_qdisc(&prio_qdisc_ops);
530
531 err = register_qdisc(&prio_qdisc_ops);
532 if (err < 0)
533 return err;
534 err = register_qdisc(&rr_qdisc_ops);
535 if (err < 0)
536 unregister_qdisc(&prio_qdisc_ops);
537 return err;
538} 438}
539 439
540static void __exit prio_module_exit(void) 440static void __exit prio_module_exit(void)
541{ 441{
542 unregister_qdisc(&prio_qdisc_ops); 442 unregister_qdisc(&prio_qdisc_ops);
543 unregister_qdisc(&rr_qdisc_ops);
544} 443}
545 444
546module_init(prio_module_init) 445module_init(prio_module_init)
547module_exit(prio_module_exit) 446module_exit(prio_module_exit)
548 447
549MODULE_LICENSE("GPL"); 448MODULE_LICENSE("GPL");
550MODULE_ALIAS("sch_rr");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 5c569853b9c0..77098acf0adc 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -174,33 +174,6 @@ static void red_destroy(struct Qdisc *sch)
174 qdisc_destroy(q->qdisc); 174 qdisc_destroy(q->qdisc);
175} 175}
176 176
177static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
178{
179 struct Qdisc *q;
180 struct nlattr *nla;
181 int ret;
182
183 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
184 TC_H_MAKE(sch->handle, 1));
185 if (q) {
186 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)),
187 GFP_KERNEL);
188 if (nla) {
189 nla->nla_type = RTM_NEWQDISC;
190 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
191 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
192
193 ret = q->ops->change(q, nla);
194 kfree(nla);
195
196 if (ret == 0)
197 return q;
198 }
199 qdisc_destroy(q);
200 }
201 return NULL;
202}
203
204static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { 177static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
205 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, 178 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
206 [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, 179 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
@@ -228,9 +201,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
228 ctl = nla_data(tb[TCA_RED_PARMS]); 201 ctl = nla_data(tb[TCA_RED_PARMS]);
229 202
230 if (ctl->limit > 0) { 203 if (ctl->limit > 0) {
231 child = red_create_dflt(sch, ctl->limit); 204 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
232 if (child == NULL) 205 if (IS_ERR(child))
233 return -ENOMEM; 206 return PTR_ERR(child);
234 } 207 }
235 208
236 sch_tree_lock(sch); 209 sch_tree_lock(sch);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 6a97afbfb952..8458f630fac4 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -461,7 +461,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
461 return -EINVAL; 461 return -EINVAL;
462 462
463 sch_tree_lock(sch); 463 sch_tree_lock(sch);
464 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 464 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
465 q->perturb_period = ctl->perturb_period * HZ; 465 q->perturb_period = ctl->perturb_period * HZ;
466 if (ctl->limit) 466 if (ctl->limit)
467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); 467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -502,7 +502,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
502 q->max_depth = 0; 502 q->max_depth = 0;
503 q->tail = SFQ_DEPTH; 503 q->tail = SFQ_DEPTH;
504 if (opt == NULL) { 504 if (opt == NULL) {
505 q->quantum = psched_mtu(sch->dev); 505 q->quantum = psched_mtu(qdisc_dev(sch));
506 q->perturb_period = 0; 506 q->perturb_period = 0;
507 q->perturbation = net_random(); 507 q->perturbation = net_random();
508 } else { 508 } else {
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0b7d78f59d8c..444c227fcb6b 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -242,34 +242,6 @@ static void tbf_reset(struct Qdisc* sch)
242 qdisc_watchdog_cancel(&q->watchdog); 242 qdisc_watchdog_cancel(&q->watchdog);
243} 243}
244 244
245static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
246{
247 struct Qdisc *q;
248 struct nlattr *nla;
249 int ret;
250
251 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
252 TC_H_MAKE(sch->handle, 1));
253 if (q) {
254 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)),
255 GFP_KERNEL);
256 if (nla) {
257 nla->nla_type = RTM_NEWQDISC;
258 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
259 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
260
261 ret = q->ops->change(q, nla);
262 kfree(nla);
263
264 if (ret == 0)
265 return q;
266 }
267 qdisc_destroy(q);
268 }
269
270 return NULL;
271}
272
273static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { 245static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
274 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, 246 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
275 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 247 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
@@ -322,8 +294,11 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
322 goto done; 294 goto done;
323 295
324 if (qopt->limit > 0) { 296 if (qopt->limit > 0) {
325 if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL) 297 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
298 if (IS_ERR(child)) {
299 err = PTR_ERR(child);
326 goto done; 300 goto done;
301 }
327 } 302 }
328 303
329 sch_tree_lock(sch); 304 sch_tree_lock(sch);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 0444fd0f0d22..8b0ff345f9da 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -78,7 +78,7 @@ struct teql_sched_data
78static int 78static int
79teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) 79teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
80{ 80{
81 struct net_device *dev = sch->dev; 81 struct net_device *dev = qdisc_dev(sch);
82 struct teql_sched_data *q = qdisc_priv(sch); 82 struct teql_sched_data *q = qdisc_priv(sch);
83 83
84 if (q->q.qlen < dev->tx_queue_len) { 84 if (q->q.qlen < dev->tx_queue_len) {
@@ -107,17 +107,19 @@ static struct sk_buff *
107teql_dequeue(struct Qdisc* sch) 107teql_dequeue(struct Qdisc* sch)
108{ 108{
109 struct teql_sched_data *dat = qdisc_priv(sch); 109 struct teql_sched_data *dat = qdisc_priv(sch);
110 struct netdev_queue *dat_queue;
110 struct sk_buff *skb; 111 struct sk_buff *skb;
111 112
112 skb = __skb_dequeue(&dat->q); 113 skb = __skb_dequeue(&dat->q);
114 dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
113 if (skb == NULL) { 115 if (skb == NULL) {
114 struct net_device *m = dat->m->dev->qdisc->dev; 116 struct net_device *m = qdisc_dev(dat_queue->qdisc);
115 if (m) { 117 if (m) {
116 dat->m->slaves = sch; 118 dat->m->slaves = sch;
117 netif_wake_queue(m); 119 netif_wake_queue(m);
118 } 120 }
119 } 121 }
120 sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen; 122 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
121 return skb; 123 return skb;
122} 124}
123 125
@@ -153,10 +155,16 @@ teql_destroy(struct Qdisc* sch)
153 if (q == master->slaves) { 155 if (q == master->slaves) {
154 master->slaves = NEXT_SLAVE(q); 156 master->slaves = NEXT_SLAVE(q);
155 if (q == master->slaves) { 157 if (q == master->slaves) {
158 struct netdev_queue *txq;
159 spinlock_t *root_lock;
160
161 txq = netdev_get_tx_queue(master->dev, 0);
156 master->slaves = NULL; 162 master->slaves = NULL;
157 spin_lock_bh(&master->dev->queue_lock); 163
158 qdisc_reset(master->dev->qdisc); 164 root_lock = qdisc_root_lock(txq->qdisc);
159 spin_unlock_bh(&master->dev->queue_lock); 165 spin_lock_bh(root_lock);
166 qdisc_reset(txq->qdisc);
167 spin_unlock_bh(root_lock);
160 } 168 }
161 } 169 }
162 skb_queue_purge(&dat->q); 170 skb_queue_purge(&dat->q);
@@ -170,7 +178,7 @@ teql_destroy(struct Qdisc* sch)
170 178
171static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) 179static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
172{ 180{
173 struct net_device *dev = sch->dev; 181 struct net_device *dev = qdisc_dev(sch);
174 struct teql_master *m = (struct teql_master*)sch->ops; 182 struct teql_master *m = (struct teql_master*)sch->ops;
175 struct teql_sched_data *q = qdisc_priv(sch); 183 struct teql_sched_data *q = qdisc_priv(sch);
176 184
@@ -216,7 +224,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
216static int 224static int
217__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 225__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
218{ 226{
219 struct teql_sched_data *q = qdisc_priv(dev->qdisc); 227 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
228 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
220 struct neighbour *mn = skb->dst->neighbour; 229 struct neighbour *mn = skb->dst->neighbour;
221 struct neighbour *n = q->ncache; 230 struct neighbour *n = q->ncache;
222 231
@@ -252,7 +261,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
252static inline int teql_resolve(struct sk_buff *skb, 261static inline int teql_resolve(struct sk_buff *skb,
253 struct sk_buff *skb_res, struct net_device *dev) 262 struct sk_buff *skb_res, struct net_device *dev)
254{ 263{
255 if (dev->qdisc == &noop_qdisc) 264 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
265 if (txq->qdisc == &noop_qdisc)
256 return -ENODEV; 266 return -ENODEV;
257 267
258 if (dev->header_ops == NULL || 268 if (dev->header_ops == NULL ||
@@ -282,12 +292,13 @@ restart:
282 goto drop; 292 goto drop;
283 293
284 do { 294 do {
285 struct net_device *slave = q->dev; 295 struct net_device *slave = qdisc_dev(q);
296 struct netdev_queue *slave_txq;
286 297
287 if (slave->qdisc_sleeping != q) 298 slave_txq = netdev_get_tx_queue(slave, 0);
299 if (slave_txq->qdisc_sleeping != q)
288 continue; 300 continue;
289 if (netif_queue_stopped(slave) || 301 if (__netif_subqueue_stopped(slave, subq) ||
290 __netif_subqueue_stopped(slave, subq) ||
291 !netif_running(slave)) { 302 !netif_running(slave)) {
292 busy = 1; 303 busy = 1;
293 continue; 304 continue;
@@ -296,8 +307,7 @@ restart:
296 switch (teql_resolve(skb, skb_res, slave)) { 307 switch (teql_resolve(skb, skb_res, slave)) {
297 case 0: 308 case 0:
298 if (netif_tx_trylock(slave)) { 309 if (netif_tx_trylock(slave)) {
299 if (!netif_queue_stopped(slave) && 310 if (!__netif_subqueue_stopped(slave, subq) &&
300 !__netif_subqueue_stopped(slave, subq) &&
301 slave->hard_start_xmit(skb, slave) == 0) { 311 slave->hard_start_xmit(skb, slave) == 0) {
302 netif_tx_unlock(slave); 312 netif_tx_unlock(slave);
303 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
@@ -352,7 +362,7 @@ static int teql_master_open(struct net_device *dev)
352 362
353 q = m->slaves; 363 q = m->slaves;
354 do { 364 do {
355 struct net_device *slave = q->dev; 365 struct net_device *slave = qdisc_dev(q);
356 366
357 if (slave == NULL) 367 if (slave == NULL)
358 return -EUNATCH; 368 return -EUNATCH;
@@ -403,7 +413,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
403 q = m->slaves; 413 q = m->slaves;
404 if (q) { 414 if (q) {
405 do { 415 do {
406 if (new_mtu > q->dev->mtu) 416 if (new_mtu > qdisc_dev(q)->mtu)
407 return -EINVAL; 417 return -EINVAL;
408 } while ((q=NEXT_SLAVE(q)) != m->slaves); 418 } while ((q=NEXT_SLAVE(q)) != m->slaves);
409 } 419 }
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 024c3ebd9661..35b6a023a6d0 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -136,6 +136,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
136 136
137 /* Set association default SACK delay */ 137 /* Set association default SACK delay */
138 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 138 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
139 asoc->sackfreq = sp->sackfreq;
139 140
140 /* Set the association default flags controlling 141 /* Set the association default flags controlling
141 * Heartbeat, SACK delay, and Path MTU Discovery. 142 * Heartbeat, SACK delay, and Path MTU Discovery.
@@ -261,6 +262,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
261 * already received one packet.] 262 * already received one packet.]
262 */ 263 */
263 asoc->peer.sack_needed = 1; 264 asoc->peer.sack_needed = 1;
265 asoc->peer.sack_cnt = 0;
264 266
265 /* Assume that the peer will tell us if he recognizes ASCONF 267 /* Assume that the peer will tell us if he recognizes ASCONF
266 * as part of INIT exchange. 268 * as part of INIT exchange.
@@ -624,6 +626,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
624 * association configured value. 626 * association configured value.
625 */ 627 */
626 peer->sackdelay = asoc->sackdelay; 628 peer->sackdelay = asoc->sackdelay;
629 peer->sackfreq = asoc->sackfreq;
627 630
628 /* Enable/disable heartbeat, SACK delay, and path MTU discovery 631 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
629 * based on association setting. 632 * based on association setting.
diff --git a/net/sctp/input.c b/net/sctp/input.c
index ca6b022b1df2..5ed93c05c23e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -61,6 +61,7 @@
61#include <net/sctp/sctp.h> 61#include <net/sctp/sctp.h>
62#include <net/sctp/sm.h> 62#include <net/sctp/sm.h>
63#include <net/sctp/checksum.h> 63#include <net/sctp/checksum.h>
64#include <net/net_namespace.h>
64 65
65/* Forward declarations for internal helpers. */ 66/* Forward declarations for internal helpers. */
66static int sctp_rcv_ootb(struct sk_buff *); 67static int sctp_rcv_ootb(struct sk_buff *);
@@ -430,6 +431,9 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
430 struct sock *sk = NULL; 431 struct sock *sk = NULL;
431 struct sctp_association *asoc; 432 struct sctp_association *asoc;
432 struct sctp_transport *transport = NULL; 433 struct sctp_transport *transport = NULL;
434 struct sctp_init_chunk *chunkhdr;
435 __u32 vtag = ntohl(sctphdr->vtag);
436 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
433 437
434 *app = NULL; *tpp = NULL; 438 *app = NULL; *tpp = NULL;
435 439
@@ -451,8 +455,28 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
451 455
452 sk = asoc->base.sk; 456 sk = asoc->base.sk;
453 457
454 if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) { 458 /* RFC 4960, Appendix C. ICMP Handling
455 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 459 *
460 * ICMP6) An implementation MUST validate that the Verification Tag
461 * contained in the ICMP message matches the Verification Tag of
462 * the peer. If the Verification Tag is not 0 and does NOT
463 * match, discard the ICMP message. If it is 0 and the ICMP
464 * message contains enough bytes to verify that the chunk type is
465 * an INIT chunk and that the Initiate Tag matches the tag of the
466 * peer, continue with ICMP7. If the ICMP message is too short
467 * or the chunk type or the Initiate Tag does not match, silently
468 * discard the packet.
469 */
470 if (vtag == 0) {
471 chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr
472 + sizeof(struct sctphdr));
473 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
474 + sizeof(__be32) ||
475 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
476 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
477 goto out;
478 }
479 } else if (vtag != asoc->c.peer_vtag) {
456 goto out; 480 goto out;
457 } 481 }
458 482
@@ -462,7 +486,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
462 * servers this needs to be solved differently. 486 * servers this needs to be solved differently.
463 */ 487 */
464 if (sock_owned_by_user(sk)) 488 if (sock_owned_by_user(sk))
465 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 489 NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
466 490
467 *app = asoc; 491 *app = asoc;
468 *tpp = transport; 492 *tpp = transport;
@@ -511,7 +535,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
511 int err; 535 int err;
512 536
513 if (skb->len < ihlen + 8) { 537 if (skb->len < ihlen + 8) {
514 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 538 ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
515 return; 539 return;
516 } 540 }
517 541
@@ -525,7 +549,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
525 skb->network_header = saveip; 549 skb->network_header = saveip;
526 skb->transport_header = savesctp; 550 skb->transport_header = savesctp;
527 if (!sk) { 551 if (!sk) {
528 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 552 ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
529 return; 553 return;
530 } 554 }
531 /* Warning: The sock lock is held. Remember to call 555 /* Warning: The sock lock is held. Remember to call
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 6d45bae93b46..9a63a3fb9011 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -50,6 +50,7 @@
50#include <linux/init.h> 50#include <linux/init.h>
51#include <net/inet_ecn.h> 51#include <net/inet_ecn.h>
52#include <net/icmp.h> 52#include <net/icmp.h>
53#include <net/net_namespace.h>
53 54
54#ifndef TEST_FRAME 55#ifndef TEST_FRAME
55#include <net/tcp.h> 56#include <net/tcp.h>
@@ -157,7 +158,8 @@ void sctp_packet_free(struct sctp_packet *packet)
157 * packet can be sent only after receiving the COOKIE_ACK. 158 * packet can be sent only after receiving the COOKIE_ACK.
158 */ 159 */
159sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, 160sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
160 struct sctp_chunk *chunk) 161 struct sctp_chunk *chunk,
162 int one_packet)
161{ 163{
162 sctp_xmit_t retval; 164 sctp_xmit_t retval;
163 int error = 0; 165 int error = 0;
@@ -175,7 +177,9 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
175 /* If we have an empty packet, then we can NOT ever 177 /* If we have an empty packet, then we can NOT ever
176 * return PMTU_FULL. 178 * return PMTU_FULL.
177 */ 179 */
178 retval = sctp_packet_append_chunk(packet, chunk); 180 if (!one_packet)
181 retval = sctp_packet_append_chunk(packet,
182 chunk);
179 } 183 }
180 break; 184 break;
181 185
@@ -592,7 +596,7 @@ out:
592 return err; 596 return err;
593no_route: 597no_route:
594 kfree_skb(nskb); 598 kfree_skb(nskb);
595 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 599 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
596 600
597 /* FIXME: Returning the 'err' will effect all the associations 601 /* FIXME: Returning the 'err' will effect all the associations
598 * associated with a socket, although only one of the paths of the 602 * associated with a socket, although only one of the paths of the
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index ace6770e9048..70ead8dc3485 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -702,6 +702,7 @@ int sctp_outq_uncork(struct sctp_outq *q)
702 return error; 702 return error;
703} 703}
704 704
705
705/* 706/*
706 * Try to flush an outqueue. 707 * Try to flush an outqueue.
707 * 708 *
@@ -725,6 +726,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
725 sctp_xmit_t status; 726 sctp_xmit_t status;
726 int error = 0; 727 int error = 0;
727 int start_timer = 0; 728 int start_timer = 0;
729 int one_packet = 0;
728 730
729 /* These transports have chunks to send. */ 731 /* These transports have chunks to send. */
730 struct list_head transport_list; 732 struct list_head transport_list;
@@ -830,20 +832,33 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
830 if (sctp_test_T_bit(chunk)) { 832 if (sctp_test_T_bit(chunk)) {
831 packet->vtag = asoc->c.my_vtag; 833 packet->vtag = asoc->c.my_vtag;
832 } 834 }
833 case SCTP_CID_SACK: 835 /* The following chunks are "response" chunks, i.e.
834 case SCTP_CID_HEARTBEAT: 836 * they are generated in response to something we
837 * received. If we are sending these, then we can
838 * send only 1 packet containing these chunks.
839 */
835 case SCTP_CID_HEARTBEAT_ACK: 840 case SCTP_CID_HEARTBEAT_ACK:
836 case SCTP_CID_SHUTDOWN:
837 case SCTP_CID_SHUTDOWN_ACK: 841 case SCTP_CID_SHUTDOWN_ACK:
838 case SCTP_CID_ERROR:
839 case SCTP_CID_COOKIE_ECHO:
840 case SCTP_CID_COOKIE_ACK: 842 case SCTP_CID_COOKIE_ACK:
841 case SCTP_CID_ECN_ECNE: 843 case SCTP_CID_COOKIE_ECHO:
844 case SCTP_CID_ERROR:
842 case SCTP_CID_ECN_CWR: 845 case SCTP_CID_ECN_CWR:
843 case SCTP_CID_ASCONF:
844 case SCTP_CID_ASCONF_ACK: 846 case SCTP_CID_ASCONF_ACK:
847 one_packet = 1;
848 /* Fall throught */
849
850 case SCTP_CID_SACK:
851 case SCTP_CID_HEARTBEAT:
852 case SCTP_CID_SHUTDOWN:
853 case SCTP_CID_ECN_ECNE:
854 case SCTP_CID_ASCONF:
845 case SCTP_CID_FWD_TSN: 855 case SCTP_CID_FWD_TSN:
846 sctp_packet_transmit_chunk(packet, chunk); 856 status = sctp_packet_transmit_chunk(packet, chunk,
857 one_packet);
858 if (status != SCTP_XMIT_OK) {
859 /* put the chunk back */
860 list_add(&chunk->list, &q->control_chunk_list);
861 }
847 break; 862 break;
848 863
849 default: 864 default:
@@ -974,7 +989,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
974 atomic_read(&chunk->skb->users) : -1); 989 atomic_read(&chunk->skb->users) : -1);
975 990
976 /* Add the chunk to the packet. */ 991 /* Add the chunk to the packet. */
977 status = sctp_packet_transmit_chunk(packet, chunk); 992 status = sctp_packet_transmit_chunk(packet, chunk, 0);
978 993
979 switch (status) { 994 switch (status) {
980 case SCTP_XMIT_PMTU_FULL: 995 case SCTP_XMIT_PMTU_FULL:
@@ -1239,7 +1254,6 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1239 * Make sure the empty queue handler will get run later. 1254 * Make sure the empty queue handler will get run later.
1240 */ 1255 */
1241 q->empty = (list_empty(&q->out_chunk_list) && 1256 q->empty = (list_empty(&q->out_chunk_list) &&
1242 list_empty(&q->control_chunk_list) &&
1243 list_empty(&q->retransmit)); 1257 list_empty(&q->retransmit));
1244 if (!q->empty) 1258 if (!q->empty)
1245 goto finish; 1259 goto finish;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 0aba759cb9b7..5dd89831eceb 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -383,3 +383,144 @@ void sctp_assocs_proc_exit(void)
383{ 383{
384 remove_proc_entry("assocs", proc_net_sctp); 384 remove_proc_entry("assocs", proc_net_sctp);
385} 385}
386
387static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
388{
389 if (*pos >= sctp_assoc_hashsize)
390 return NULL;
391
392 if (*pos < 0)
393 *pos = 0;
394
395 if (*pos == 0)
396 seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
397 "REM_ADDR_RTX START\n");
398
399 return (void *)pos;
400}
401
402static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos)
403{
404 if (++*pos >= sctp_assoc_hashsize)
405 return NULL;
406
407 return pos;
408}
409
410static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
411{
412 return;
413}
414
415static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
416{
417 struct sctp_hashbucket *head;
418 struct sctp_ep_common *epb;
419 struct sctp_association *assoc;
420 struct hlist_node *node;
421 struct sctp_transport *tsp;
422 int hash = *(loff_t *)v;
423
424 if (hash >= sctp_assoc_hashsize)
425 return -ENOMEM;
426
427 head = &sctp_assoc_hashtable[hash];
428 sctp_local_bh_disable();
429 read_lock(&head->lock);
430 sctp_for_each_hentry(epb, node, &head->chain) {
431 assoc = sctp_assoc(epb);
432 list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
433 transports) {
434 /*
435 * The remote address (ADDR)
436 */
437 tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr);
438 seq_printf(seq, " ");
439
440 /*
441 * The association ID (ASSOC_ID)
442 */
443 seq_printf(seq, "%d ", tsp->asoc->assoc_id);
444
445 /*
446 * If the Heartbeat is active (HB_ACT)
447 * Note: 1 = Active, 0 = Inactive
448 */
449 seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer));
450
451 /*
452 * Retransmit time out (RTO)
453 */
454 seq_printf(seq, "%lu ", tsp->rto);
455
456 /*
457 * Maximum path retransmit count (PATH_MAX_RTX)
458 */
459 seq_printf(seq, "%d ", tsp->pathmaxrxt);
460
461 /*
462 * remote address retransmit count (REM_ADDR_RTX)
463 * Note: We don't have a way to tally this at the moment
464 * so lets just leave it as zero for the moment
465 */
466 seq_printf(seq, "0 ");
467
468 /*
469 * remote address start time (START). This is also not
470 * currently implemented, but we can record it with a
471 * jiffies marker in a subsequent patch
472 */
473 seq_printf(seq, "0");
474
475 seq_printf(seq, "\n");
476 }
477 }
478
479 read_unlock(&head->lock);
480 sctp_local_bh_enable();
481
482 return 0;
483
484}
485
486static const struct seq_operations sctp_remaddr_ops = {
487 .start = sctp_remaddr_seq_start,
488 .next = sctp_remaddr_seq_next,
489 .stop = sctp_remaddr_seq_stop,
490 .show = sctp_remaddr_seq_show,
491};
492
493/* Cleanup the proc fs entry for 'remaddr' object. */
494void sctp_remaddr_proc_exit(void)
495{
496 remove_proc_entry("remaddr", proc_net_sctp);
497}
498
499static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
500{
501 return seq_open(file, &sctp_remaddr_ops);
502}
503
504static const struct file_operations sctp_remaddr_seq_fops = {
505 .open = sctp_remaddr_seq_open,
506 .read = seq_read,
507 .llseek = seq_lseek,
508 .release = seq_release,
509};
510
511int __init sctp_remaddr_proc_init(void)
512{
513 struct proc_dir_entry *p;
514
515 p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp);
516 if (!p)
517 return -ENOMEM;
518 p->proc_fops = &sctp_remaddr_seq_fops;
519
520 return 0;
521}
522
523void sctp_assoc_proc_exit(void)
524{
525 remove_proc_entry("remaddr", proc_net_sctp);
526}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 9258dfe784ae..98c6a882016a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -113,9 +113,13 @@ static __init int sctp_proc_init(void)
113 goto out_eps_proc_init; 113 goto out_eps_proc_init;
114 if (sctp_assocs_proc_init()) 114 if (sctp_assocs_proc_init())
115 goto out_assocs_proc_init; 115 goto out_assocs_proc_init;
116 if (sctp_remaddr_proc_init())
117 goto out_remaddr_proc_init;
116 118
117 return 0; 119 return 0;
118 120
121out_remaddr_proc_init:
122 sctp_assocs_proc_exit();
119out_assocs_proc_init: 123out_assocs_proc_init:
120 sctp_eps_proc_exit(); 124 sctp_eps_proc_exit();
121out_eps_proc_init: 125out_eps_proc_init:
@@ -138,6 +142,7 @@ static void sctp_proc_exit(void)
138 sctp_snmp_proc_exit(); 142 sctp_snmp_proc_exit();
139 sctp_eps_proc_exit(); 143 sctp_eps_proc_exit();
140 sctp_assocs_proc_exit(); 144 sctp_assocs_proc_exit();
145 sctp_remaddr_proc_exit();
141 146
142 if (proc_net_sctp) { 147 if (proc_net_sctp) {
143 proc_net_sctp = NULL; 148 proc_net_sctp = NULL;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 23a9f1a95b7d..9732c797e8ed 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -190,20 +190,28 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
190 * unacknowledged DATA chunk. ... 190 * unacknowledged DATA chunk. ...
191 */ 191 */
192 if (!asoc->peer.sack_needed) { 192 if (!asoc->peer.sack_needed) {
193 /* We will need a SACK for the next packet. */ 193 asoc->peer.sack_cnt++;
194 asoc->peer.sack_needed = 1;
195 194
196 /* Set the SACK delay timeout based on the 195 /* Set the SACK delay timeout based on the
197 * SACK delay for the last transport 196 * SACK delay for the last transport
198 * data was received from, or the default 197 * data was received from, or the default
199 * for the association. 198 * for the association.
200 */ 199 */
201 if (trans) 200 if (trans) {
201 /* We will need a SACK for the next packet. */
202 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
203 asoc->peer.sack_needed = 1;
204
202 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
203 trans->sackdelay; 206 trans->sackdelay;
204 else 207 } else {
208 /* We will need a SACK for the next packet. */
209 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
210 asoc->peer.sack_needed = 1;
211
205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 212 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
206 asoc->sackdelay; 213 asoc->sackdelay;
214 }
207 215
208 /* Restart the SACK timer. */ 216 /* Restart the SACK timer. */
209 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 217 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
@@ -216,6 +224,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
216 goto nomem; 224 goto nomem;
217 225
218 asoc->peer.sack_needed = 0; 226 asoc->peer.sack_needed = 0;
227 asoc->peer.sack_cnt = 0;
219 228
220 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); 229 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
221 230
@@ -655,7 +664,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
655 struct sctp_association *asoc, 664 struct sctp_association *asoc,
656 struct sctp_sackhdr *sackh) 665 struct sctp_sackhdr *sackh)
657{ 666{
658 int err; 667 int err = 0;
659 668
660 if (sctp_outq_sack(&asoc->outqueue, sackh)) { 669 if (sctp_outq_sack(&asoc->outqueue, sackh)) {
661 /* There are no more TSNs awaiting SACK. */ 670 /* There are no more TSNs awaiting SACK. */
@@ -663,11 +672,6 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
663 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), 672 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
664 asoc->state, asoc->ep, asoc, NULL, 673 asoc->state, asoc->ep, asoc, NULL,
665 GFP_ATOMIC); 674 GFP_ATOMIC);
666 } else {
667 /* Windows may have opened, so we need
668 * to check if we have DATA to transmit
669 */
670 err = sctp_outq_flush(&asoc->outqueue, 0);
671 } 675 }
672 676
673 return err; 677 return err;
@@ -1472,8 +1476,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1472 break; 1476 break;
1473 1477
1474 case SCTP_CMD_DISCARD_PACKET: 1478 case SCTP_CMD_DISCARD_PACKET:
1475 /* We need to discard the whole packet. */ 1479 /* We need to discard the whole packet.
1480 * Uncork the queue since there might be
1481 * responses pending
1482 */
1476 chunk->pdiscard = 1; 1483 chunk->pdiscard = 1;
1484 if (asoc) {
1485 sctp_outq_uncork(&asoc->outqueue);
1486 local_cork = 0;
1487 }
1477 break; 1488 break;
1478 1489
1479 case SCTP_CMD_RTO_PENDING: 1490 case SCTP_CMD_RTO_PENDING:
@@ -1544,8 +1555,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1544 } 1555 }
1545 1556
1546out: 1557out:
1547 if (local_cork) 1558 /* If this is in response to a received chunk, wait until
1548 sctp_outq_uncork(&asoc->outqueue); 1559 * we are done with the packet to open the queue so that we don't
1560 * send multiple packets in response to a single request.
1561 */
1562 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1563 if (chunk->end_of_packet || chunk->singleton)
1564 sctp_outq_uncork(&asoc->outqueue);
1565 } else if (local_cork)
1566 sctp_outq_uncork(&asoc->outqueue);
1549 return error; 1567 return error;
1550nomem: 1568nomem:
1551 error = -ENOMEM; 1569 error = -ENOMEM;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index fcdb45d1071b..8848d329aa2c 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -795,8 +795,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
795 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 795 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
796 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 796 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
797 797
798 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
799
800 /* This will send the COOKIE ACK */ 798 /* This will send the COOKIE ACK */
801 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 799 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
802 800
@@ -883,7 +881,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
883 if (asoc->autoclose) 881 if (asoc->autoclose)
884 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 882 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
885 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 883 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
886 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
887 884
888 /* It may also notify its ULP about the successful 885 /* It may also notify its ULP about the successful
889 * establishment of the association with a Communication Up 886 * establishment of the association with a Communication Up
@@ -1781,7 +1778,6 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1781 goto nomem; 1778 goto nomem;
1782 1779
1783 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1780 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1784 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
1785 1781
1786 /* RFC 2960 5.1 Normal Establishment of an Association 1782 /* RFC 2960 5.1 Normal Establishment of an Association
1787 * 1783 *
@@ -1898,12 +1894,13 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1898 1894
1899 } 1895 }
1900 } 1896 }
1901 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
1902 1897
1903 repl = sctp_make_cookie_ack(new_asoc, chunk); 1898 repl = sctp_make_cookie_ack(new_asoc, chunk);
1904 if (!repl) 1899 if (!repl)
1905 goto nomem; 1900 goto nomem;
1906 1901
1902 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1903
1907 if (ev) 1904 if (ev)
1908 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 1905 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
1909 SCTP_ULPEVENT(ev)); 1906 SCTP_ULPEVENT(ev));
@@ -1911,9 +1908,6 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1911 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 1908 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
1912 SCTP_ULPEVENT(ai_ev)); 1909 SCTP_ULPEVENT(ai_ev));
1913 1910
1914 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1915 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
1916
1917 return SCTP_DISPOSITION_CONSUME; 1911 return SCTP_DISPOSITION_CONSUME;
1918 1912
1919nomem: 1913nomem:
@@ -3970,9 +3964,6 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
3970 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3964 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3971 break; 3965 break;
3972 case SCTP_CID_ACTION_DISCARD_ERR: 3966 case SCTP_CID_ACTION_DISCARD_ERR:
3973 /* Discard the packet. */
3974 sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3975
3976 /* Generate an ERROR chunk as response. */ 3967 /* Generate an ERROR chunk as response. */
3977 hdr = unk_chunk->chunk_hdr; 3968 hdr = unk_chunk->chunk_hdr;
3978 err_chunk = sctp_make_op_error(asoc, unk_chunk, 3969 err_chunk = sctp_make_op_error(asoc, unk_chunk,
@@ -3982,6 +3973,9 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
3982 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3973 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3983 SCTP_CHUNK(err_chunk)); 3974 SCTP_CHUNK(err_chunk));
3984 } 3975 }
3976
3977 /* Discard the packet. */
3978 sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3985 return SCTP_DISPOSITION_CONSUME; 3979 return SCTP_DISPOSITION_CONSUME;
3986 break; 3980 break;
3987 case SCTP_CID_ACTION_SKIP: 3981 case SCTP_CID_ACTION_SKIP:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0dbcde6758ea..6aba01b0ce4e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -116,7 +116,7 @@ static int sctp_memory_pressure;
116static atomic_t sctp_memory_allocated; 116static atomic_t sctp_memory_allocated;
117static atomic_t sctp_sockets_allocated; 117static atomic_t sctp_sockets_allocated;
118 118
119static void sctp_enter_memory_pressure(void) 119static void sctp_enter_memory_pressure(struct sock *sk)
120{ 120{
121 sctp_memory_pressure = 1; 121 sctp_memory_pressure = 1;
122} 122}
@@ -956,7 +956,8 @@ out:
956 */ 956 */
957static int __sctp_connect(struct sock* sk, 957static int __sctp_connect(struct sock* sk,
958 struct sockaddr *kaddrs, 958 struct sockaddr *kaddrs,
959 int addrs_size) 959 int addrs_size,
960 sctp_assoc_t *assoc_id)
960{ 961{
961 struct sctp_sock *sp; 962 struct sctp_sock *sp;
962 struct sctp_endpoint *ep; 963 struct sctp_endpoint *ep;
@@ -1111,6 +1112,8 @@ static int __sctp_connect(struct sock* sk,
1111 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1112 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1112 1113
1113 err = sctp_wait_for_connect(asoc, &timeo); 1114 err = sctp_wait_for_connect(asoc, &timeo);
1115 if (!err && assoc_id)
1116 *assoc_id = asoc->assoc_id;
1114 1117
1115 /* Don't free association on exit. */ 1118 /* Don't free association on exit. */
1116 asoc = NULL; 1119 asoc = NULL;
@@ -1128,7 +1131,8 @@ out_free:
1128/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1131/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1129 * 1132 *
1130 * API 8.9 1133 * API 8.9
1131 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt); 1134 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1135 * sctp_assoc_t *asoc);
1132 * 1136 *
1133 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1137 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1134 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1138 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
@@ -1144,8 +1148,10 @@ out_free:
1144 * representation is termed a "packed array" of addresses). The caller 1148 * representation is termed a "packed array" of addresses). The caller
1145 * specifies the number of addresses in the array with addrcnt. 1149 * specifies the number of addresses in the array with addrcnt.
1146 * 1150 *
1147 * On success, sctp_connectx() returns 0. On failure, sctp_connectx() returns 1151 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1148 * -1, and sets errno to the appropriate error code. 1152 * the association id of the new association. On failure, sctp_connectx()
1153 * returns -1, and sets errno to the appropriate error code. The assoc_id
1154 * is not touched by the kernel.
1149 * 1155 *
1150 * For SCTP, the port given in each socket address must be the same, or 1156 * For SCTP, the port given in each socket address must be the same, or
1151 * sctp_connectx() will fail, setting errno to EINVAL. 1157 * sctp_connectx() will fail, setting errno to EINVAL.
@@ -1182,11 +1188,12 @@ out_free:
1182 * addrs The pointer to the addresses in user land 1188 * addrs The pointer to the addresses in user land
1183 * addrssize Size of the addrs buffer 1189 * addrssize Size of the addrs buffer
1184 * 1190 *
1185 * Returns 0 if ok, <0 errno code on error. 1191 * Returns >=0 if ok, <0 errno code on error.
1186 */ 1192 */
1187SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, 1193SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
1188 struct sockaddr __user *addrs, 1194 struct sockaddr __user *addrs,
1189 int addrs_size) 1195 int addrs_size,
1196 sctp_assoc_t *assoc_id)
1190{ 1197{
1191 int err = 0; 1198 int err = 0;
1192 struct sockaddr *kaddrs; 1199 struct sockaddr *kaddrs;
@@ -1209,13 +1216,46 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1209 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1216 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1210 err = -EFAULT; 1217 err = -EFAULT;
1211 } else { 1218 } else {
1212 err = __sctp_connect(sk, kaddrs, addrs_size); 1219 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1213 } 1220 }
1214 1221
1215 kfree(kaddrs); 1222 kfree(kaddrs);
1223
1216 return err; 1224 return err;
1217} 1225}
1218 1226
1227/*
1228 * This is an older interface. It's kept for backward compatibility
1229 * to the option that doesn't provide association id.
1230 */
1231SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
1232 struct sockaddr __user *addrs,
1233 int addrs_size)
1234{
1235 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1236}
1237
1238/*
1239 * New interface for the API. The since the API is done with a socket
1240 * option, to make it simple we feed back the association id is as a return
1241 * indication to the call. Error is always negative and association id is
1242 * always positive.
1243 */
1244SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1245 struct sockaddr __user *addrs,
1246 int addrs_size)
1247{
1248 sctp_assoc_t assoc_id = 0;
1249 int err = 0;
1250
1251 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1252
1253 if (err)
1254 return err;
1255 else
1256 return assoc_id;
1257}
1258
1219/* API 3.1.4 close() - UDP Style Syntax 1259/* API 3.1.4 close() - UDP Style Syntax
1220 * Applications use close() to perform graceful shutdown (as described in 1260 * Applications use close() to perform graceful shutdown (as described in
1221 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1261 * Section 10.1 of [SCTP]) on ALL the associations currently represented
@@ -2305,74 +2345,98 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2305 return 0; 2345 return 0;
2306} 2346}
2307 2347
2308/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) 2348/*
2309 * 2349 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2310 * This options will get or set the delayed ack timer. The time is set 2350 *
2311 * in milliseconds. If the assoc_id is 0, then this sets or gets the 2351 * This option will effect the way delayed acks are performed. This
2312 * endpoints default delayed ack timer value. If the assoc_id field is 2352 * option allows you to get or set the delayed ack time, in
2313 * non-zero, then the set or get effects the specified association. 2353 * milliseconds. It also allows changing the delayed ack frequency.
2314 * 2354 * Changing the frequency to 1 disables the delayed sack algorithm. If
2315 * struct sctp_assoc_value { 2355 * the assoc_id is 0, then this sets or gets the endpoints default
2316 * sctp_assoc_t assoc_id; 2356 * values. If the assoc_id field is non-zero, then the set or get
2317 * uint32_t assoc_value; 2357 * effects the specified association for the one to many model (the
2318 * }; 2358 * assoc_id field is ignored by the one to one model). Note that if
2359 * sack_delay or sack_freq are 0 when setting this option, then the
2360 * current values will remain unchanged.
2361 *
2362 * struct sctp_sack_info {
2363 * sctp_assoc_t sack_assoc_id;
2364 * uint32_t sack_delay;
2365 * uint32_t sack_freq;
2366 * };
2319 * 2367 *
2320 * assoc_id - This parameter, indicates which association the 2368 * sack_assoc_id - This parameter, indicates which association the user
2321 * user is preforming an action upon. Note that if 2369 * is performing an action upon. Note that if this field's value is
2322 * this field's value is zero then the endpoints 2370 * zero then the endpoints default value is changed (effecting future
2323 * default value is changed (effecting future 2371 * associations only).
2324 * associations only).
2325 * 2372 *
2326 * assoc_value - This parameter contains the number of milliseconds 2373 * sack_delay - This parameter contains the number of milliseconds that
2327 * that the user is requesting the delayed ACK timer 2374 * the user is requesting the delayed ACK timer be set to. Note that
2328 * be set to. Note that this value is defined in 2375 * this value is defined in the standard to be between 200 and 500
2329 * the standard to be between 200 and 500 milliseconds. 2376 * milliseconds.
2330 * 2377 *
2331 * Note: a value of zero will leave the value alone, 2378 * sack_freq - This parameter contains the number of packets that must
2332 * but disable SACK delay. A non-zero value will also 2379 * be received before a sack is sent without waiting for the delay
2333 * enable SACK delay. 2380 * timer to expire. The default value for this is 2, setting this
2381 * value to 1 will disable the delayed sack algorithm.
2334 */ 2382 */
2335 2383
2336static int sctp_setsockopt_delayed_ack_time(struct sock *sk, 2384static int sctp_setsockopt_delayed_ack(struct sock *sk,
2337 char __user *optval, int optlen) 2385 char __user *optval, int optlen)
2338{ 2386{
2339 struct sctp_assoc_value params; 2387 struct sctp_sack_info params;
2340 struct sctp_transport *trans = NULL; 2388 struct sctp_transport *trans = NULL;
2341 struct sctp_association *asoc = NULL; 2389 struct sctp_association *asoc = NULL;
2342 struct sctp_sock *sp = sctp_sk(sk); 2390 struct sctp_sock *sp = sctp_sk(sk);
2343 2391
2344 if (optlen != sizeof(struct sctp_assoc_value)) 2392 if (optlen == sizeof(struct sctp_sack_info)) {
2345 return - EINVAL; 2393 if (copy_from_user(&params, optval, optlen))
2394 return -EFAULT;
2346 2395
2347 if (copy_from_user(&params, optval, optlen)) 2396 if (params.sack_delay == 0 && params.sack_freq == 0)
2348 return -EFAULT; 2397 return 0;
2398 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2399 printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info "
2400 "in delayed_ack socket option deprecated\n");
2401 printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n");
2402 if (copy_from_user(&params, optval, optlen))
2403 return -EFAULT;
2404
2405 if (params.sack_delay == 0)
2406 params.sack_freq = 1;
2407 else
2408 params.sack_freq = 0;
2409 } else
2410 return - EINVAL;
2349 2411
2350 /* Validate value parameter. */ 2412 /* Validate value parameter. */
2351 if (params.assoc_value > 500) 2413 if (params.sack_delay > 500)
2352 return -EINVAL; 2414 return -EINVAL;
2353 2415
2354 /* Get association, if assoc_id != 0 and the socket is a one 2416 /* Get association, if sack_assoc_id != 0 and the socket is a one
2355 * to many style socket, and an association was not found, then 2417 * to many style socket, and an association was not found, then
2356 * the id was invalid. 2418 * the id was invalid.
2357 */ 2419 */
2358 asoc = sctp_id2assoc(sk, params.assoc_id); 2420 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2359 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 2421 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2360 return -EINVAL; 2422 return -EINVAL;
2361 2423
2362 if (params.assoc_value) { 2424 if (params.sack_delay) {
2363 if (asoc) { 2425 if (asoc) {
2364 asoc->sackdelay = 2426 asoc->sackdelay =
2365 msecs_to_jiffies(params.assoc_value); 2427 msecs_to_jiffies(params.sack_delay);
2366 asoc->param_flags = 2428 asoc->param_flags =
2367 (asoc->param_flags & ~SPP_SACKDELAY) | 2429 (asoc->param_flags & ~SPP_SACKDELAY) |
2368 SPP_SACKDELAY_ENABLE; 2430 SPP_SACKDELAY_ENABLE;
2369 } else { 2431 } else {
2370 sp->sackdelay = params.assoc_value; 2432 sp->sackdelay = params.sack_delay;
2371 sp->param_flags = 2433 sp->param_flags =
2372 (sp->param_flags & ~SPP_SACKDELAY) | 2434 (sp->param_flags & ~SPP_SACKDELAY) |
2373 SPP_SACKDELAY_ENABLE; 2435 SPP_SACKDELAY_ENABLE;
2374 } 2436 }
2375 } else { 2437 }
2438
2439 if (params.sack_freq == 1) {
2376 if (asoc) { 2440 if (asoc) {
2377 asoc->param_flags = 2441 asoc->param_flags =
2378 (asoc->param_flags & ~SPP_SACKDELAY) | 2442 (asoc->param_flags & ~SPP_SACKDELAY) |
@@ -2382,22 +2446,40 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
2382 (sp->param_flags & ~SPP_SACKDELAY) | 2446 (sp->param_flags & ~SPP_SACKDELAY) |
2383 SPP_SACKDELAY_DISABLE; 2447 SPP_SACKDELAY_DISABLE;
2384 } 2448 }
2449 } else if (params.sack_freq > 1) {
2450 if (asoc) {
2451 asoc->sackfreq = params.sack_freq;
2452 asoc->param_flags =
2453 (asoc->param_flags & ~SPP_SACKDELAY) |
2454 SPP_SACKDELAY_ENABLE;
2455 } else {
2456 sp->sackfreq = params.sack_freq;
2457 sp->param_flags =
2458 (sp->param_flags & ~SPP_SACKDELAY) |
2459 SPP_SACKDELAY_ENABLE;
2460 }
2385 } 2461 }
2386 2462
2387 /* If change is for association, also apply to each transport. */ 2463 /* If change is for association, also apply to each transport. */
2388 if (asoc) { 2464 if (asoc) {
2389 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2465 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2390 transports) { 2466 transports) {
2391 if (params.assoc_value) { 2467 if (params.sack_delay) {
2392 trans->sackdelay = 2468 trans->sackdelay =
2393 msecs_to_jiffies(params.assoc_value); 2469 msecs_to_jiffies(params.sack_delay);
2394 trans->param_flags = 2470 trans->param_flags =
2395 (trans->param_flags & ~SPP_SACKDELAY) | 2471 (trans->param_flags & ~SPP_SACKDELAY) |
2396 SPP_SACKDELAY_ENABLE; 2472 SPP_SACKDELAY_ENABLE;
2397 } else { 2473 }
2474 if (params.sack_freq == 1) {
2398 trans->param_flags = 2475 trans->param_flags =
2399 (trans->param_flags & ~SPP_SACKDELAY) | 2476 (trans->param_flags & ~SPP_SACKDELAY) |
2400 SPP_SACKDELAY_DISABLE; 2477 SPP_SACKDELAY_DISABLE;
2478 } else if (params.sack_freq > 1) {
2479 trans->sackfreq = params.sack_freq;
2480 trans->param_flags =
2481 (trans->param_flags & ~SPP_SACKDELAY) |
2482 SPP_SACKDELAY_ENABLE;
2401 } 2483 }
2402 } 2484 }
2403 } 2485 }
@@ -3164,10 +3246,18 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3164 optlen, SCTP_BINDX_REM_ADDR); 3246 optlen, SCTP_BINDX_REM_ADDR);
3165 break; 3247 break;
3166 3248
3249 case SCTP_SOCKOPT_CONNECTX_OLD:
3250 /* 'optlen' is the size of the addresses buffer. */
3251 retval = sctp_setsockopt_connectx_old(sk,
3252 (struct sockaddr __user *)optval,
3253 optlen);
3254 break;
3255
3167 case SCTP_SOCKOPT_CONNECTX: 3256 case SCTP_SOCKOPT_CONNECTX:
3168 /* 'optlen' is the size of the addresses buffer. */ 3257 /* 'optlen' is the size of the addresses buffer. */
3169 retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, 3258 retval = sctp_setsockopt_connectx(sk,
3170 optlen); 3259 (struct sockaddr __user *)optval,
3260 optlen);
3171 break; 3261 break;
3172 3262
3173 case SCTP_DISABLE_FRAGMENTS: 3263 case SCTP_DISABLE_FRAGMENTS:
@@ -3186,8 +3276,8 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3186 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3276 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3187 break; 3277 break;
3188 3278
3189 case SCTP_DELAYED_ACK_TIME: 3279 case SCTP_DELAYED_ACK:
3190 retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); 3280 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3191 break; 3281 break;
3192 case SCTP_PARTIAL_DELIVERY_POINT: 3282 case SCTP_PARTIAL_DELIVERY_POINT:
3193 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3283 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
@@ -3294,7 +3384,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
3294 /* Pass correct addr len to common routine (so it knows there 3384 /* Pass correct addr len to common routine (so it knows there
3295 * is only one address being passed. 3385 * is only one address being passed.
3296 */ 3386 */
3297 err = __sctp_connect(sk, addr, af->sockaddr_len); 3387 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
3298 } 3388 }
3299 3389
3300 sctp_release_sock(sk); 3390 sctp_release_sock(sk);
@@ -3446,6 +3536,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3446 sp->pathmaxrxt = sctp_max_retrans_path; 3536 sp->pathmaxrxt = sctp_max_retrans_path;
3447 sp->pathmtu = 0; // allow default discovery 3537 sp->pathmtu = 0; // allow default discovery
3448 sp->sackdelay = sctp_sack_timeout; 3538 sp->sackdelay = sctp_sack_timeout;
3539 sp->sackfreq = 2;
3449 sp->param_flags = SPP_HB_ENABLE | 3540 sp->param_flags = SPP_HB_ENABLE |
3450 SPP_PMTUD_ENABLE | 3541 SPP_PMTUD_ENABLE |
3451 SPP_SACKDELAY_ENABLE; 3542 SPP_SACKDELAY_ENABLE;
@@ -3497,7 +3588,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3497} 3588}
3498 3589
3499/* Cleanup any SCTP per socket resources. */ 3590/* Cleanup any SCTP per socket resources. */
3500SCTP_STATIC int sctp_destroy_sock(struct sock *sk) 3591SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3501{ 3592{
3502 struct sctp_endpoint *ep; 3593 struct sctp_endpoint *ep;
3503 3594
@@ -3507,7 +3598,6 @@ SCTP_STATIC int sctp_destroy_sock(struct sock *sk)
3507 ep = sctp_sk(sk)->ep; 3598 ep = sctp_sk(sk)->ep;
3508 sctp_endpoint_free(ep); 3599 sctp_endpoint_free(ep);
3509 atomic_dec(&sctp_sockets_allocated); 3600 atomic_dec(&sctp_sockets_allocated);
3510 return 0;
3511} 3601}
3512 3602
3513/* API 4.1.7 shutdown() - TCP Style Syntax 3603/* API 4.1.7 shutdown() - TCP Style Syntax
@@ -3999,70 +4089,91 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
3999 return 0; 4089 return 0;
4000} 4090}
4001 4091
4002/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) 4092/*
4003 * 4093 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
4004 * This options will get or set the delayed ack timer. The time is set 4094 *
4005 * in milliseconds. If the assoc_id is 0, then this sets or gets the 4095 * This option will effect the way delayed acks are performed. This
4006 * endpoints default delayed ack timer value. If the assoc_id field is 4096 * option allows you to get or set the delayed ack time, in
4007 * non-zero, then the set or get effects the specified association. 4097 * milliseconds. It also allows changing the delayed ack frequency.
4008 * 4098 * Changing the frequency to 1 disables the delayed sack algorithm. If
4009 * struct sctp_assoc_value { 4099 * the assoc_id is 0, then this sets or gets the endpoints default
4010 * sctp_assoc_t assoc_id; 4100 * values. If the assoc_id field is non-zero, then the set or get
4011 * uint32_t assoc_value; 4101 * effects the specified association for the one to many model (the
4012 * }; 4102 * assoc_id field is ignored by the one to one model). Note that if
4103 * sack_delay or sack_freq are 0 when setting this option, then the
4104 * current values will remain unchanged.
4105 *
4106 * struct sctp_sack_info {
4107 * sctp_assoc_t sack_assoc_id;
4108 * uint32_t sack_delay;
4109 * uint32_t sack_freq;
4110 * };
4013 * 4111 *
4014 * assoc_id - This parameter, indicates which association the 4112 * sack_assoc_id - This parameter, indicates which association the user
4015 * user is preforming an action upon. Note that if 4113 * is performing an action upon. Note that if this field's value is
4016 * this field's value is zero then the endpoints 4114 * zero then the endpoints default value is changed (effecting future
4017 * default value is changed (effecting future 4115 * associations only).
4018 * associations only).
4019 * 4116 *
4020 * assoc_value - This parameter contains the number of milliseconds 4117 * sack_delay - This parameter contains the number of milliseconds that
4021 * that the user is requesting the delayed ACK timer 4118 * the user is requesting the delayed ACK timer be set to. Note that
4022 * be set to. Note that this value is defined in 4119 * this value is defined in the standard to be between 200 and 500
4023 * the standard to be between 200 and 500 milliseconds. 4120 * milliseconds.
4024 * 4121 *
4025 * Note: a value of zero will leave the value alone, 4122 * sack_freq - This parameter contains the number of packets that must
4026 * but disable SACK delay. A non-zero value will also 4123 * be received before a sack is sent without waiting for the delay
4027 * enable SACK delay. 4124 * timer to expire. The default value for this is 2, setting this
4125 * value to 1 will disable the delayed sack algorithm.
4028 */ 4126 */
4029static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len, 4127static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4030 char __user *optval, 4128 char __user *optval,
4031 int __user *optlen) 4129 int __user *optlen)
4032{ 4130{
4033 struct sctp_assoc_value params; 4131 struct sctp_sack_info params;
4034 struct sctp_association *asoc = NULL; 4132 struct sctp_association *asoc = NULL;
4035 struct sctp_sock *sp = sctp_sk(sk); 4133 struct sctp_sock *sp = sctp_sk(sk);
4036 4134
4037 if (len < sizeof(struct sctp_assoc_value)) 4135 if (len >= sizeof(struct sctp_sack_info)) {
4038 return - EINVAL; 4136 len = sizeof(struct sctp_sack_info);
4039
4040 len = sizeof(struct sctp_assoc_value);
4041 4137
4042 if (copy_from_user(&params, optval, len)) 4138 if (copy_from_user(&params, optval, len))
4043 return -EFAULT; 4139 return -EFAULT;
4140 } else if (len == sizeof(struct sctp_assoc_value)) {
4141 printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info "
4142 "in delayed_ack socket option deprecated\n");
4143 printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n");
4144 if (copy_from_user(&params, optval, len))
4145 return -EFAULT;
4146 } else
4147 return - EINVAL;
4044 4148
4045 /* Get association, if assoc_id != 0 and the socket is a one 4149 /* Get association, if sack_assoc_id != 0 and the socket is a one
4046 * to many style socket, and an association was not found, then 4150 * to many style socket, and an association was not found, then
4047 * the id was invalid. 4151 * the id was invalid.
4048 */ 4152 */
4049 asoc = sctp_id2assoc(sk, params.assoc_id); 4153 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
4050 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 4154 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
4051 return -EINVAL; 4155 return -EINVAL;
4052 4156
4053 if (asoc) { 4157 if (asoc) {
4054 /* Fetch association values. */ 4158 /* Fetch association values. */
4055 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) 4159 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
4056 params.assoc_value = jiffies_to_msecs( 4160 params.sack_delay = jiffies_to_msecs(
4057 asoc->sackdelay); 4161 asoc->sackdelay);
4058 else 4162 params.sack_freq = asoc->sackfreq;
4059 params.assoc_value = 0; 4163
4164 } else {
4165 params.sack_delay = 0;
4166 params.sack_freq = 1;
4167 }
4060 } else { 4168 } else {
4061 /* Fetch socket values. */ 4169 /* Fetch socket values. */
4062 if (sp->param_flags & SPP_SACKDELAY_ENABLE) 4170 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
4063 params.assoc_value = sp->sackdelay; 4171 params.sack_delay = sp->sackdelay;
4064 else 4172 params.sack_freq = sp->sackfreq;
4065 params.assoc_value = 0; 4173 } else {
4174 params.sack_delay = 0;
4175 params.sack_freq = 1;
4176 }
4066 } 4177 }
4067 4178
4068 if (copy_to_user(optval, &params, len)) 4179 if (copy_to_user(optval, &params, len))
@@ -4112,6 +4223,8 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
4112 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) 4223 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4113 return -EFAULT; 4224 return -EFAULT;
4114 4225
4226 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_NUM_OLD "
4227 "socket option deprecated\n");
4115 /* For UDP-style sockets, id specifies the association to query. */ 4228 /* For UDP-style sockets, id specifies the association to query. */
4116 asoc = sctp_id2assoc(sk, id); 4229 asoc = sctp_id2assoc(sk, id);
4117 if (!asoc) 4230 if (!asoc)
@@ -4151,6 +4264,9 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
4151 4264
4152 if (getaddrs.addr_num <= 0) return -EINVAL; 4265 if (getaddrs.addr_num <= 0) return -EINVAL;
4153 4266
4267 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_OLD "
4268 "socket option deprecated\n");
4269
4154 /* For UDP-style sockets, id specifies the association to query. */ 4270 /* For UDP-style sockets, id specifies the association to query. */
4155 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4271 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4156 if (!asoc) 4272 if (!asoc)
@@ -4244,6 +4360,9 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4244 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) 4360 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4245 return -EFAULT; 4361 return -EFAULT;
4246 4362
4363 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_NUM_OLD "
4364 "socket option deprecated\n");
4365
4247 /* 4366 /*
4248 * For UDP-style sockets, id specifies the association to query. 4367 * For UDP-style sockets, id specifies the association to query.
4249 * If the id field is set to the value '0' then the locally bound 4368 * If the id field is set to the value '0' then the locally bound
@@ -4404,6 +4523,10 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4404 if (getaddrs.addr_num <= 0 || 4523 if (getaddrs.addr_num <= 0 ||
4405 getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr))) 4524 getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
4406 return -EINVAL; 4525 return -EINVAL;
4526
4527 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_OLD "
4528 "socket option deprecated\n");
4529
4407 /* 4530 /*
4408 * For UDP-style sockets, id specifies the association to query. 4531 * For UDP-style sockets, id specifies the association to query.
4409 * If the id field is set to the value '0' then the locally bound 4532 * If the id field is set to the value '0' then the locally bound
@@ -5220,8 +5343,8 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5220 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5343 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
5221 optlen); 5344 optlen);
5222 break; 5345 break;
5223 case SCTP_DELAYED_ACK_TIME: 5346 case SCTP_DELAYED_ACK:
5224 retval = sctp_getsockopt_delayed_ack_time(sk, len, optval, 5347 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
5225 optlen); 5348 optlen);
5226 break; 5349 break;
5227 case SCTP_INITMSG: 5350 case SCTP_INITMSG:
diff --git a/net/socket.c b/net/socket.c
index 66c4a8cf6db9..81fe82513046 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -90,6 +90,7 @@
90#include <asm/unistd.h> 90#include <asm/unistd.h>
91 91
92#include <net/compat.h> 92#include <net/compat.h>
93#include <net/wext.h>
93 94
94#include <net/sock.h> 95#include <net/sock.h>
95#include <linux/netfilter.h> 96#include <linux/netfilter.h>
@@ -2210,10 +2211,19 @@ static long compat_sock_ioctl(struct file *file, unsigned cmd,
2210{ 2211{
2211 struct socket *sock = file->private_data; 2212 struct socket *sock = file->private_data;
2212 int ret = -ENOIOCTLCMD; 2213 int ret = -ENOIOCTLCMD;
2214 struct sock *sk;
2215 struct net *net;
2216
2217 sk = sock->sk;
2218 net = sock_net(sk);
2213 2219
2214 if (sock->ops->compat_ioctl) 2220 if (sock->ops->compat_ioctl)
2215 ret = sock->ops->compat_ioctl(sock, cmd, arg); 2221 ret = sock->ops->compat_ioctl(sock, cmd, arg);
2216 2222
2223 if (ret == -ENOIOCTLCMD &&
2224 (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
2225 ret = compat_wext_handle_ioctl(net, cmd, arg);
2226
2217 return ret; 2227 return ret;
2218} 2228}
2219#endif 2229#endif
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 834a83199bdf..853a4142cea1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -33,8 +33,6 @@
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * $Id$
38 */ 36 */
39 37
40 38
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index b4f0525f91af..007c1a6708ee 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -4,7 +4,6 @@
4 * Begun April 1, 1996, Mike Shaver. 4 * Begun April 1, 1996, Mike Shaver.
5 * Added /proc/sys/net directories for each protocol family. [MS] 5 * Added /proc/sys/net directories for each protocol family. [MS]
6 * 6 *
7 * $Log: sysctl_net.c,v $
8 * Revision 1.2 1996/05/08 20:24:40 shaver 7 * Revision 1.2 1996/05/08 20:24:40 shaver
9 * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and 8 * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and
10 * NET_IPV4_IP_FORWARD. 9 * NET_IPV4_IP_FORWARD.
@@ -40,6 +39,27 @@ static struct ctl_table_root net_sysctl_root = {
40 .lookup = net_ctl_header_lookup, 39 .lookup = net_ctl_header_lookup,
41}; 40};
42 41
42static LIST_HEAD(net_sysctl_ro_tables);
43static struct list_head *net_ctl_ro_header_lookup(struct ctl_table_root *root,
44 struct nsproxy *namespaces)
45{
46 return &net_sysctl_ro_tables;
47}
48
49static int net_ctl_ro_header_perms(struct ctl_table_root *root,
50 struct nsproxy *namespaces, struct ctl_table *table)
51{
52 if (namespaces->net_ns == &init_net)
53 return table->mode;
54 else
55 return table->mode & ~0222;
56}
57
58static struct ctl_table_root net_sysctl_ro_root = {
59 .lookup = net_ctl_ro_header_lookup,
60 .permissions = net_ctl_ro_header_perms,
61};
62
43static int sysctl_net_init(struct net *net) 63static int sysctl_net_init(struct net *net)
44{ 64{
45 INIT_LIST_HEAD(&net->sysctl_table_headers); 65 INIT_LIST_HEAD(&net->sysctl_table_headers);
@@ -64,6 +84,7 @@ static __init int sysctl_init(void)
64 if (ret) 84 if (ret)
65 goto out; 85 goto out;
66 register_sysctl_root(&net_sysctl_root); 86 register_sysctl_root(&net_sysctl_root);
87 register_sysctl_root(&net_sysctl_ro_root);
67out: 88out:
68 return ret; 89 return ret;
69} 90}
@@ -80,6 +101,14 @@ struct ctl_table_header *register_net_sysctl_table(struct net *net,
80} 101}
81EXPORT_SYMBOL_GPL(register_net_sysctl_table); 102EXPORT_SYMBOL_GPL(register_net_sysctl_table);
82 103
104struct ctl_table_header *register_net_sysctl_rotable(const
105 struct ctl_path *path, struct ctl_table *table)
106{
107 return __register_sysctl_paths(&net_sysctl_ro_root,
108 &init_nsproxy, path, table);
109}
110EXPORT_SYMBOL_GPL(register_net_sysctl_rotable);
111
83void unregister_net_sysctl_table(struct ctl_table_header *header) 112void unregister_net_sysctl_table(struct ctl_table_header *header)
84{ 113{
85 unregister_sysctl_table(header); 114 unregister_sysctl_table(header);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e7880172ef19..b1ff16aa4bdb 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -276,7 +276,7 @@ static void bclink_send_nack(struct node *n_ptr)
276 if (buf) { 276 if (buf) {
277 msg = buf_msg(buf); 277 msg = buf_msg(buf);
278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
279 TIPC_OK, INT_H_SIZE, n_ptr->addr); 279 INT_H_SIZE, n_ptr->addr);
280 msg_set_mc_netid(msg, tipc_net_id); 280 msg_set_mc_netid(msg, tipc_net_id);
281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
@@ -571,7 +571,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
571 assert(tipc_cltr_bcast_nodes.count != 0); 571 assert(tipc_cltr_bcast_nodes.count != 0);
572 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count); 572 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
573 msg = buf_msg(buf); 573 msg = buf_msg(buf);
574 msg_set_non_seq(msg); 574 msg_set_non_seq(msg, 1);
575 msg_set_mc_netid(msg, tipc_net_id); 575 msg_set_mc_netid(msg, tipc_net_id);
576 } 576 }
577 577
@@ -611,7 +611,7 @@ swap:
611 bcbearer->bpairs[bp_index].secondary = p; 611 bcbearer->bpairs[bp_index].secondary = p;
612update: 612update:
613 if (bcbearer->remains_new.count == 0) 613 if (bcbearer->remains_new.count == 0)
614 return TIPC_OK; 614 return 0;
615 615
616 bcbearer->remains = bcbearer->remains_new; 616 bcbearer->remains = bcbearer->remains_new;
617 } 617 }
@@ -620,7 +620,7 @@ update:
620 620
621 bcbearer->bearer.publ.blocked = 1; 621 bcbearer->bearer.publ.blocked = 1;
622 bcl->stats.bearer_congs++; 622 bcl->stats.bearer_congs++;
623 return ~TIPC_OK; 623 return 1;
624} 624}
625 625
626/** 626/**
@@ -756,7 +756,7 @@ int tipc_bclink_reset_stats(void)
756 spin_lock_bh(&bc_lock); 756 spin_lock_bh(&bc_lock);
757 memset(&bcl->stats, 0, sizeof(bcl->stats)); 757 memset(&bcl->stats, 0, sizeof(bcl->stats));
758 spin_unlock_bh(&bc_lock); 758 spin_unlock_bh(&bc_lock);
759 return TIPC_OK; 759 return 0;
760} 760}
761 761
762int tipc_bclink_set_queue_limits(u32 limit) 762int tipc_bclink_set_queue_limits(u32 limit)
@@ -769,7 +769,7 @@ int tipc_bclink_set_queue_limits(u32 limit)
769 spin_lock_bh(&bc_lock); 769 spin_lock_bh(&bc_lock);
770 tipc_link_set_queue_limits(bcl, limit); 770 tipc_link_set_queue_limits(bcl, limit);
771 spin_unlock_bh(&bc_lock); 771 spin_unlock_bh(&bc_lock);
772 return TIPC_OK; 772 return 0;
773} 773}
774 774
775int tipc_bclink_init(void) 775int tipc_bclink_init(void)
@@ -810,7 +810,7 @@ int tipc_bclink_init(void)
810 tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); 810 tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
811 } 811 }
812 812
813 return TIPC_OK; 813 return 0;
814} 814}
815 815
816void tipc_bclink_stop(void) 816void tipc_bclink_stop(void)
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 271a375b49b7..6a9aba3edd08 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -370,7 +370,7 @@ void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
370 */ 370 */
371static int bearer_push(struct bearer *b_ptr) 371static int bearer_push(struct bearer *b_ptr)
372{ 372{
373 u32 res = TIPC_OK; 373 u32 res = 0;
374 struct link *ln, *tln; 374 struct link *ln, *tln;
375 375
376 if (b_ptr->publ.blocked) 376 if (b_ptr->publ.blocked)
@@ -607,7 +607,7 @@ int tipc_block_bearer(const char *name)
607 } 607 }
608 spin_unlock_bh(&b_ptr->publ.lock); 608 spin_unlock_bh(&b_ptr->publ.lock);
609 read_unlock_bh(&tipc_net_lock); 609 read_unlock_bh(&tipc_net_lock);
610 return TIPC_OK; 610 return 0;
611} 611}
612 612
613/** 613/**
@@ -645,7 +645,7 @@ static int bearer_disable(const char *name)
645 } 645 }
646 spin_unlock_bh(&b_ptr->publ.lock); 646 spin_unlock_bh(&b_ptr->publ.lock);
647 memset(b_ptr, 0, sizeof(struct bearer)); 647 memset(b_ptr, 0, sizeof(struct bearer));
648 return TIPC_OK; 648 return 0;
649} 649}
650 650
651int tipc_disable_bearer(const char *name) 651int tipc_disable_bearer(const char *name)
@@ -668,7 +668,7 @@ int tipc_bearer_init(void)
668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); 668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); 669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
670 if (tipc_bearers && media_list) { 670 if (tipc_bearers && media_list) {
671 res = TIPC_OK; 671 res = 0;
672 } else { 672 } else {
673 kfree(tipc_bearers); 673 kfree(tipc_bearers);
674 kfree(media_list); 674 kfree(media_list);
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 4bb3404f610b..46ee6c58532d 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -238,7 +238,7 @@ static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
238 if (buf) { 238 if (buf) {
239 msg = buf_msg(buf); 239 msg = buf_msg(buf);
240 memset((char *)msg, 0, size); 240 memset((char *)msg, 0, size);
241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest); 241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
242 } 242 }
243 return buf; 243 return buf;
244} 244}
@@ -571,6 +571,6 @@ exit:
571int tipc_cltr_init(void) 571int tipc_cltr_init(void)
572{ 572{
573 tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves; 573 tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
574 return tipc_cltr_create(tipc_own_addr) ? TIPC_OK : -ENOMEM; 574 return tipc_cltr_create(tipc_own_addr) ? 0 : -ENOMEM;
575} 575}
576 576
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c71337a22d33..ca3544d030c7 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -2,7 +2,7 @@
2 * net/tipc/config.c: TIPC configuration management code 2 * net/tipc/config.c: TIPC configuration management code
3 * 3 *
4 * Copyright (c) 2002-2006, Ericsson AB 4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -293,7 +293,6 @@ static struct sk_buff *cfg_set_own_addr(void)
293 if (tipc_mode == TIPC_NET_MODE) 293 if (tipc_mode == TIPC_NET_MODE)
294 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 294 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
295 " (cannot change node address once assigned)"); 295 " (cannot change node address once assigned)");
296 tipc_own_addr = addr;
297 296
298 /* 297 /*
299 * Must release all spinlocks before calling start_net() because 298 * Must release all spinlocks before calling start_net() because
@@ -306,7 +305,7 @@ static struct sk_buff *cfg_set_own_addr(void)
306 */ 305 */
307 306
308 spin_unlock_bh(&config_lock); 307 spin_unlock_bh(&config_lock);
309 tipc_core_start_net(); 308 tipc_core_start_net(addr);
310 spin_lock_bh(&config_lock); 309 spin_lock_bh(&config_lock);
311 return tipc_cfg_reply_none(); 310 return tipc_cfg_reply_none();
312} 311}
@@ -529,7 +528,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
529 break; 528 break;
530#endif 529#endif
531 case TIPC_CMD_SET_LOG_SIZE: 530 case TIPC_CMD_SET_LOG_SIZE:
532 rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space); 531 rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space);
533 break; 532 break;
534 case TIPC_CMD_DUMP_LOG: 533 case TIPC_CMD_DUMP_LOG:
535 rep_tlv_buf = tipc_log_dump(); 534 rep_tlv_buf = tipc_log_dump();
@@ -602,6 +601,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
602 case TIPC_CMD_GET_NETID: 601 case TIPC_CMD_GET_NETID:
603 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); 602 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
604 break; 603 break;
604 case TIPC_CMD_NOT_NET_ADMIN:
605 rep_tlv_buf =
606 tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
607 break;
605 default: 608 default:
606 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 609 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
607 " (unknown command)"); 610 " (unknown command)");
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 740aac5cdfb6..3256bd7d398f 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -49,7 +49,7 @@
49#include "config.h" 49#include "config.h"
50 50
51 51
52#define TIPC_MOD_VER "1.6.3" 52#define TIPC_MOD_VER "1.6.4"
53 53
54#ifndef CONFIG_TIPC_ZONES 54#ifndef CONFIG_TIPC_ZONES
55#define CONFIG_TIPC_ZONES 3 55#define CONFIG_TIPC_ZONES 3
@@ -117,11 +117,11 @@ void tipc_core_stop_net(void)
117 * start_net - start TIPC networking sub-systems 117 * start_net - start TIPC networking sub-systems
118 */ 118 */
119 119
120int tipc_core_start_net(void) 120int tipc_core_start_net(unsigned long addr)
121{ 121{
122 int res; 122 int res;
123 123
124 if ((res = tipc_net_start()) || 124 if ((res = tipc_net_start(addr)) ||
125 (res = tipc_eth_media_start())) { 125 (res = tipc_eth_media_start())) {
126 tipc_core_stop_net(); 126 tipc_core_stop_net();
127 } 127 }
@@ -164,8 +164,7 @@ int tipc_core_start(void)
164 tipc_mode = TIPC_NODE_MODE; 164 tipc_mode = TIPC_NODE_MODE;
165 165
166 if ((res = tipc_handler_start()) || 166 if ((res = tipc_handler_start()) ||
167 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions, 167 (res = tipc_ref_table_init(tipc_max_ports, tipc_random)) ||
168 tipc_random)) ||
169 (res = tipc_reg_start()) || 168 (res = tipc_reg_start()) ||
170 (res = tipc_nametbl_init()) || 169 (res = tipc_nametbl_init()) ||
171 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) || 170 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
@@ -182,7 +181,7 @@ static int __init tipc_init(void)
182{ 181{
183 int res; 182 int res;
184 183
185 tipc_log_reinit(CONFIG_TIPC_LOG); 184 tipc_log_resize(CONFIG_TIPC_LOG);
186 info("Activated (version " TIPC_MOD_VER 185 info("Activated (version " TIPC_MOD_VER
187 " compiled " __DATE__ " " __TIME__ ")\n"); 186 " compiled " __DATE__ " " __TIME__ ")\n");
188 187
@@ -209,7 +208,7 @@ static void __exit tipc_exit(void)
209 tipc_core_stop_net(); 208 tipc_core_stop_net();
210 tipc_core_stop(); 209 tipc_core_stop();
211 info("Deactivated\n"); 210 info("Deactivated\n");
212 tipc_log_stop(); 211 tipc_log_resize(0);
213} 212}
214 213
215module_init(tipc_init); 214module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5a0e4878d3b7..a881f92a8537 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -2,7 +2,7 @@
2 * net/tipc/core.h: Include file for TIPC global declarations 2 * net/tipc/core.h: Include file for TIPC global declarations
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -59,84 +59,108 @@
59#include <linux/vmalloc.h> 59#include <linux/vmalloc.h>
60 60
61/* 61/*
62 * TIPC debugging code 62 * TIPC sanity test macros
63 */ 63 */
64 64
65#define assert(i) BUG_ON(!(i)) 65#define assert(i) BUG_ON(!(i))
66 66
67struct tipc_msg;
68extern struct print_buf *TIPC_NULL, *TIPC_CONS, *TIPC_LOG;
69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
71void tipc_printf(struct print_buf *, const char *fmt, ...);
72void tipc_dump(struct print_buf*,const char *fmt, ...);
73
74#ifdef CONFIG_TIPC_DEBUG
75
76/* 67/*
77 * TIPC debug support included: 68 * TIPC system monitoring code
78 * - system messages are printed to TIPC_OUTPUT print buffer
79 * - debug messages are printed to DBG_OUTPUT print buffer
80 */ 69 */
81 70
82#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg) 71/*
83#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg) 72 * TIPC's print buffer subsystem supports the following print buffers:
84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg) 73 *
74 * TIPC_NULL : null buffer (i.e. print nowhere)
75 * TIPC_CONS : system console
76 * TIPC_LOG : TIPC log buffer
77 * &buf : user-defined buffer (struct print_buf *)
78 *
79 * Note: TIPC_LOG is configured to echo its output to the system console;
80 * user-defined buffers can be configured to do the same thing.
81 */
85 82
86#define dbg(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0) 83extern struct print_buf *const TIPC_NULL;
87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT != TIPC_NULL) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0) 84extern struct print_buf *const TIPC_CONS;
88#define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) 85extern struct print_buf *const TIPC_LOG;
89 86
87void tipc_printf(struct print_buf *, const char *fmt, ...);
90 88
91/* 89/*
92 * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer, 90 * TIPC_OUTPUT is the destination print buffer for system messages.
93 * while DBG_OUTPUT is the null print buffer. These defaults can be changed
94 * here, or on a per .c file basis, by redefining these symbols. The following
95 * print buffer options are available:
96 *
97 * TIPC_NULL : null buffer (i.e. print nowhere)
98 * TIPC_CONS : system console
99 * TIPC_LOG : TIPC log buffer
100 * &buf : user-defined buffer (struct print_buf *)
101 * TIPC_TEE(&buf_a,&buf_b) : list of buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG))
102 */ 91 */
103 92
104#ifndef TIPC_OUTPUT 93#ifndef TIPC_OUTPUT
105#define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG) 94#define TIPC_OUTPUT TIPC_LOG
106#endif
107
108#ifndef DBG_OUTPUT
109#define DBG_OUTPUT TIPC_NULL
110#endif 95#endif
111 96
112#else
113
114/* 97/*
115 * TIPC debug support not included: 98 * TIPC can be configured to send system messages to TIPC_OUTPUT
116 * - system messages are printed to system console 99 * or to the system console only.
117 * - debug messages are not printed
118 */ 100 */
119 101
102#ifdef CONFIG_TIPC_DEBUG
103
104#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
105 KERN_ERR "TIPC: " fmt, ## arg)
106#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
107 KERN_WARNING "TIPC: " fmt, ## arg)
108#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
109 KERN_NOTICE "TIPC: " fmt, ## arg)
110
111#else
112
120#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg) 113#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
121#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg) 114#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
122#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg) 115#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
123 116
124#define dbg(fmt, arg...) do {} while (0) 117#endif
125#define msg_dbg(msg,txt) do {} while (0)
126#define dump(fmt,arg...) do {} while (0)
127 118
119/*
120 * DBG_OUTPUT is the destination print buffer for debug messages.
121 * It defaults to the the null print buffer, but can be redefined
122 * (typically in the individual .c files being debugged) to allow
123 * selected debug messages to be generated where needed.
124 */
125
126#ifndef DBG_OUTPUT
127#define DBG_OUTPUT TIPC_NULL
128#endif
128 129
129/* 130/*
130 * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is 131 * TIPC can be configured to send debug messages to the specified print buffer
131 * the null print buffer. Thes ensures that any system or debug messages 132 * (typically DBG_OUTPUT) or to suppress them entirely.
132 * that are generated without using the above macros are handled correctly.
133 */ 133 */
134 134
135#undef TIPC_OUTPUT 135#ifdef CONFIG_TIPC_DEBUG
136#define TIPC_OUTPUT TIPC_CONS
137 136
138#undef DBG_OUTPUT 137#define dbg(fmt, arg...) \
139#define DBG_OUTPUT TIPC_NULL 138 do { \
139 if (DBG_OUTPUT != TIPC_NULL) \
140 tipc_printf(DBG_OUTPUT, fmt, ## arg); \
141 } while (0)
142#define msg_dbg(msg, txt) \
143 do { \
144 if (DBG_OUTPUT != TIPC_NULL) \
145 tipc_msg_dbg(DBG_OUTPUT, msg, txt); \
146 } while (0)
147#define dump(fmt, arg...) \
148 do { \
149 if (DBG_OUTPUT != TIPC_NULL) \
150 tipc_dump_dbg(DBG_OUTPUT, fmt, ##arg); \
151 } while (0)
152
153void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
154void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
155
156#else
157
158#define dbg(fmt, arg...) do {} while (0)
159#define msg_dbg(msg, txt) do {} while (0)
160#define dump(fmt, arg...) do {} while (0)
161
162#define tipc_msg_dbg(...) do {} while (0)
163#define tipc_dump_dbg(...) do {} while (0)
140 164
141#endif 165#endif
142 166
@@ -178,7 +202,7 @@ extern atomic_t tipc_user_count;
178 202
179extern int tipc_core_start(void); 203extern int tipc_core_start(void);
180extern void tipc_core_stop(void); 204extern void tipc_core_stop(void);
181extern int tipc_core_start_net(void); 205extern int tipc_core_start_net(unsigned long addr);
182extern void tipc_core_stop_net(void); 206extern void tipc_core_stop_net(void);
183extern int tipc_handler_start(void); 207extern int tipc_handler_start(void);
184extern void tipc_handler_stop(void); 208extern void tipc_handler_stop(void);
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index e809d2a2ce06..29ecae851668 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -2,7 +2,7 @@
2 * net/tipc/dbg.c: TIPC print buffer routines for debugging 2 * net/tipc/dbg.c: TIPC print buffer routines for debugging
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -38,17 +38,43 @@
38#include "config.h" 38#include "config.h"
39#include "dbg.h" 39#include "dbg.h"
40 40
41static char print_string[TIPC_PB_MAX_STR]; 41/*
42static DEFINE_SPINLOCK(print_lock); 42 * TIPC pre-defines the following print buffers:
43 *
44 * TIPC_NULL : null buffer (i.e. print nowhere)
45 * TIPC_CONS : system console
46 * TIPC_LOG : TIPC log buffer
47 *
48 * Additional user-defined print buffers are also permitted.
49 */
43 50
44static struct print_buf null_buf = { NULL, 0, NULL, NULL }; 51static struct print_buf null_buf = { NULL, 0, NULL, 0 };
45struct print_buf *TIPC_NULL = &null_buf; 52struct print_buf *const TIPC_NULL = &null_buf;
46 53
47static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 54static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
48struct print_buf *TIPC_CONS = &cons_buf; 55struct print_buf *const TIPC_CONS = &cons_buf;
49 56
50static struct print_buf log_buf = { NULL, 0, NULL, NULL }; 57static struct print_buf log_buf = { NULL, 0, NULL, 1 };
51struct print_buf *TIPC_LOG = &log_buf; 58struct print_buf *const TIPC_LOG = &log_buf;
59
60/*
61 * Locking policy when using print buffers.
62 *
63 * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to
64 * 'print_string' when writing to a print buffer. This also protects against
65 * concurrent writes to the print buffer being written to.
66 *
67 * 2) tipc_dump() and tipc_log_XXX() leverage the aforementioned
68 * use of 'print_lock' to protect against all types of concurrent operations
69 * on their associated print buffer (not just write operations).
70 *
71 * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
72 * on the caller to prevent simultaneous use of the print buffer(s) being
73 * manipulated.
74 */
75
76static char print_string[TIPC_PB_MAX_STR];
77static DEFINE_SPINLOCK(print_lock);
52 78
53 79
54#define FORMAT(PTR,LEN,FMT) \ 80#define FORMAT(PTR,LEN,FMT) \
@@ -60,27 +86,14 @@ struct print_buf *TIPC_LOG = &log_buf;
60 *(PTR + LEN) = '\0';\ 86 *(PTR + LEN) = '\0';\
61} 87}
62 88
63/*
64 * Locking policy when using print buffers.
65 *
66 * The following routines use 'print_lock' for protection:
67 * 1) tipc_printf() - to protect its print buffer(s) and 'print_string'
68 * 2) TIPC_TEE() - to protect its print buffer(s)
69 * 3) tipc_dump() - to protect its print buffer(s) and 'print_string'
70 * 4) tipc_log_XXX() - to protect TIPC_LOG
71 *
72 * All routines of the form tipc_printbuf_XXX() rely on the caller to prevent
73 * simultaneous use of the print buffer(s) being manipulated.
74 */
75
76/** 89/**
77 * tipc_printbuf_init - initialize print buffer to empty 90 * tipc_printbuf_init - initialize print buffer to empty
78 * @pb: pointer to print buffer structure 91 * @pb: pointer to print buffer structure
79 * @raw: pointer to character array used by print buffer 92 * @raw: pointer to character array used by print buffer
80 * @size: size of character array 93 * @size: size of character array
81 * 94 *
82 * Makes the print buffer a null device that discards anything written to it 95 * Note: If the character array is too small (or absent), the print buffer
83 * if the character array is too small (or absent). 96 * becomes a null device that discards anything written to it.
84 */ 97 */
85 98
86void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) 99void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
@@ -88,13 +101,13 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
88 pb->buf = raw; 101 pb->buf = raw;
89 pb->crs = raw; 102 pb->crs = raw;
90 pb->size = size; 103 pb->size = size;
91 pb->next = NULL; 104 pb->echo = 0;
92 105
93 if (size < TIPC_PB_MIN_SIZE) { 106 if (size < TIPC_PB_MIN_SIZE) {
94 pb->buf = NULL; 107 pb->buf = NULL;
95 } else if (raw) { 108 } else if (raw) {
96 pb->buf[0] = 0; 109 pb->buf[0] = 0;
97 pb->buf[size-1] = ~0; 110 pb->buf[size - 1] = ~0;
98 } 111 }
99} 112}
100 113
@@ -105,7 +118,11 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
105 118
106void tipc_printbuf_reset(struct print_buf *pb) 119void tipc_printbuf_reset(struct print_buf *pb)
107{ 120{
108 tipc_printbuf_init(pb, pb->buf, pb->size); 121 if (pb->buf) {
122 pb->crs = pb->buf;
123 pb->buf[0] = 0;
124 pb->buf[pb->size - 1] = ~0;
125 }
109} 126}
110 127
111/** 128/**
@@ -141,7 +158,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
141 158
142 if (pb->buf[pb->size - 1] == 0) { 159 if (pb->buf[pb->size - 1] == 0) {
143 cp_buf = kmalloc(pb->size, GFP_ATOMIC); 160 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
144 if (cp_buf != NULL){ 161 if (cp_buf) {
145 tipc_printbuf_init(&cb, cp_buf, pb->size); 162 tipc_printbuf_init(&cb, cp_buf, pb->size);
146 tipc_printbuf_move(&cb, pb); 163 tipc_printbuf_move(&cb, pb);
147 tipc_printbuf_move(pb, &cb); 164 tipc_printbuf_move(pb, &cb);
@@ -179,15 +196,16 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
179 } 196 }
180 197
181 if (pb_to->size < pb_from->size) { 198 if (pb_to->size < pb_from->size) {
182 tipc_printbuf_reset(pb_to); 199 strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***");
183 tipc_printf(pb_to, "*** PRINT BUFFER MOVE ERROR ***"); 200 pb_to->buf[pb_to->size - 1] = ~0;
201 pb_to->crs = strchr(pb_to->buf, 0);
184 return; 202 return;
185 } 203 }
186 204
187 /* Copy data from char after cursor to end (if used) */ 205 /* Copy data from char after cursor to end (if used) */
188 206
189 len = pb_from->buf + pb_from->size - pb_from->crs - 2; 207 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
190 if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) { 208 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
191 strcpy(pb_to->buf, pb_from->crs + 1); 209 strcpy(pb_to->buf, pb_from->crs + 1);
192 pb_to->crs = pb_to->buf + len; 210 pb_to->crs = pb_to->buf + len;
193 } else 211 } else
@@ -203,8 +221,8 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
203} 221}
204 222
205/** 223/**
206 * tipc_printf - append formatted output to print buffer chain 224 * tipc_printf - append formatted output to print buffer
207 * @pb: pointer to chain of print buffers (may be NULL) 225 * @pb: pointer to print buffer
208 * @fmt: formatted info to be printed 226 * @fmt: formatted info to be printed
209 */ 227 */
210 228
@@ -213,68 +231,40 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
213 int chars_to_add; 231 int chars_to_add;
214 int chars_left; 232 int chars_left;
215 char save_char; 233 char save_char;
216 struct print_buf *pb_next;
217 234
218 spin_lock_bh(&print_lock); 235 spin_lock_bh(&print_lock);
236
219 FORMAT(print_string, chars_to_add, fmt); 237 FORMAT(print_string, chars_to_add, fmt);
220 if (chars_to_add >= TIPC_PB_MAX_STR) 238 if (chars_to_add >= TIPC_PB_MAX_STR)
221 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***"); 239 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
222 240
223 while (pb) { 241 if (pb->buf) {
224 if (pb == TIPC_CONS) 242 chars_left = pb->buf + pb->size - pb->crs - 1;
225 printk(print_string); 243 if (chars_to_add <= chars_left) {
226 else if (pb->buf) { 244 strcpy(pb->crs, print_string);
227 chars_left = pb->buf + pb->size - pb->crs - 1; 245 pb->crs += chars_to_add;
228 if (chars_to_add <= chars_left) { 246 } else if (chars_to_add >= (pb->size - 1)) {
229 strcpy(pb->crs, print_string); 247 strcpy(pb->buf, print_string + chars_to_add + 1
230 pb->crs += chars_to_add; 248 - pb->size);
231 } else if (chars_to_add >= (pb->size - 1)) { 249 pb->crs = pb->buf + pb->size - 1;
232 strcpy(pb->buf, print_string + chars_to_add + 1 250 } else {
233 - pb->size); 251 strcpy(pb->buf, print_string + chars_left);
234 pb->crs = pb->buf + pb->size - 1; 252 save_char = print_string[chars_left];
235 } else { 253 print_string[chars_left] = 0;
236 strcpy(pb->buf, print_string + chars_left); 254 strcpy(pb->crs, print_string);
237 save_char = print_string[chars_left]; 255 print_string[chars_left] = save_char;
238 print_string[chars_left] = 0; 256 pb->crs = pb->buf + chars_to_add - chars_left;
239 strcpy(pb->crs, print_string);
240 print_string[chars_left] = save_char;
241 pb->crs = pb->buf + chars_to_add - chars_left;
242 }
243 } 257 }
244 pb_next = pb->next;
245 pb->next = NULL;
246 pb = pb_next;
247 } 258 }
248 spin_unlock_bh(&print_lock);
249}
250 259
251/** 260 if (pb->echo)
252 * TIPC_TEE - perform next output operation on both print buffers 261 printk(print_string);
253 * @b0: pointer to chain of print buffers (may be NULL)
254 * @b1: pointer to print buffer to add to chain
255 *
256 * Returns pointer to print buffer chain.
257 */
258 262
259struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
260{
261 struct print_buf *pb = b0;
262
263 if (!b0 || (b0 == b1))
264 return b1;
265
266 spin_lock_bh(&print_lock);
267 while (pb->next) {
268 if ((pb->next == b1) || (pb->next == b0))
269 pb->next = pb->next->next;
270 else
271 pb = pb->next;
272 }
273 pb->next = b1;
274 spin_unlock_bh(&print_lock); 263 spin_unlock_bh(&print_lock);
275 return b0;
276} 264}
277 265
266#ifdef CONFIG_TIPC_DEBUG
267
278/** 268/**
279 * print_to_console - write string of bytes to console in multiple chunks 269 * print_to_console - write string of bytes to console in multiple chunks
280 */ 270 */
@@ -321,72 +311,66 @@ static void printbuf_dump(struct print_buf *pb)
321} 311}
322 312
323/** 313/**
324 * tipc_dump - dump non-console print buffer(s) to console 314 * tipc_dump_dbg - dump (non-console) print buffer to console
325 * @pb: pointer to chain of print buffers 315 * @pb: pointer to print buffer
326 */ 316 */
327 317
328void tipc_dump(struct print_buf *pb, const char *fmt, ...) 318void tipc_dump_dbg(struct print_buf *pb, const char *fmt, ...)
329{ 319{
330 struct print_buf *pb_next;
331 int len; 320 int len;
332 321
322 if (pb == TIPC_CONS)
323 return;
324
333 spin_lock_bh(&print_lock); 325 spin_lock_bh(&print_lock);
326
334 FORMAT(print_string, len, fmt); 327 FORMAT(print_string, len, fmt);
335 printk(print_string); 328 printk(print_string);
336 329
337 for (; pb; pb = pb->next) { 330 printk("\n---- Start of %s log dump ----\n\n",
338 if (pb != TIPC_CONS) { 331 (pb == TIPC_LOG) ? "global" : "local");
339 printk("\n---- Start of %s log dump ----\n\n", 332 printbuf_dump(pb);
340 (pb == TIPC_LOG) ? "global" : "local"); 333 tipc_printbuf_reset(pb);
341 printbuf_dump(pb); 334 printk("\n---- End of dump ----\n");
342 tipc_printbuf_reset(pb); 335
343 printk("\n---- End of dump ----\n");
344 }
345 pb_next = pb->next;
346 pb->next = NULL;
347 pb = pb_next;
348 }
349 spin_unlock_bh(&print_lock); 336 spin_unlock_bh(&print_lock);
350} 337}
351 338
339#endif
340
352/** 341/**
353 * tipc_log_stop - free up TIPC log print buffer 342 * tipc_log_resize - change the size of the TIPC log buffer
343 * @log_size: print buffer size to use
354 */ 344 */
355 345
356void tipc_log_stop(void) 346int tipc_log_resize(int log_size)
357{ 347{
348 int res = 0;
349
358 spin_lock_bh(&print_lock); 350 spin_lock_bh(&print_lock);
359 if (TIPC_LOG->buf) { 351 if (TIPC_LOG->buf) {
360 kfree(TIPC_LOG->buf); 352 kfree(TIPC_LOG->buf);
361 TIPC_LOG->buf = NULL; 353 TIPC_LOG->buf = NULL;
362 } 354 }
363 spin_unlock_bh(&print_lock);
364}
365
366/**
367 * tipc_log_reinit - (re)initialize TIPC log print buffer
368 * @log_size: print buffer size to use
369 */
370
371void tipc_log_reinit(int log_size)
372{
373 tipc_log_stop();
374
375 if (log_size) { 355 if (log_size) {
376 if (log_size < TIPC_PB_MIN_SIZE) 356 if (log_size < TIPC_PB_MIN_SIZE)
377 log_size = TIPC_PB_MIN_SIZE; 357 log_size = TIPC_PB_MIN_SIZE;
378 spin_lock_bh(&print_lock); 358 res = TIPC_LOG->echo;
379 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), 359 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
380 log_size); 360 log_size);
381 spin_unlock_bh(&print_lock); 361 TIPC_LOG->echo = res;
362 res = !TIPC_LOG->buf;
382 } 363 }
364 spin_unlock_bh(&print_lock);
365
366 return res;
383} 367}
384 368
385/** 369/**
386 * tipc_log_resize - reconfigure size of TIPC log buffer 370 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
387 */ 371 */
388 372
389struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space) 373struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
390{ 374{
391 u32 value; 375 u32 value;
392 376
@@ -397,7 +381,9 @@ struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space)
397 if (value != delimit(value, 0, 32768)) 381 if (value != delimit(value, 0, 32768))
398 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 382 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
399 " (log size must be 0-32768)"); 383 " (log size must be 0-32768)");
400 tipc_log_reinit(value); 384 if (tipc_log_resize(value))
385 return tipc_cfg_reply_error_string(
386 "unable to create specified log (log size is now 0)");
401 return tipc_cfg_reply_none(); 387 return tipc_cfg_reply_none();
402} 388}
403 389
@@ -410,27 +396,32 @@ struct sk_buff *tipc_log_dump(void)
410 struct sk_buff *reply; 396 struct sk_buff *reply;
411 397
412 spin_lock_bh(&print_lock); 398 spin_lock_bh(&print_lock);
413 if (!TIPC_LOG->buf) 399 if (!TIPC_LOG->buf) {
400 spin_unlock_bh(&print_lock);
414 reply = tipc_cfg_reply_ultra_string("log not activated\n"); 401 reply = tipc_cfg_reply_ultra_string("log not activated\n");
415 else if (tipc_printbuf_empty(TIPC_LOG)) 402 } else if (tipc_printbuf_empty(TIPC_LOG)) {
403 spin_unlock_bh(&print_lock);
416 reply = tipc_cfg_reply_ultra_string("log is empty\n"); 404 reply = tipc_cfg_reply_ultra_string("log is empty\n");
405 }
417 else { 406 else {
418 struct tlv_desc *rep_tlv; 407 struct tlv_desc *rep_tlv;
419 struct print_buf pb; 408 struct print_buf pb;
420 int str_len; 409 int str_len;
421 410
422 str_len = min(TIPC_LOG->size, 32768u); 411 str_len = min(TIPC_LOG->size, 32768u);
412 spin_unlock_bh(&print_lock);
423 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len)); 413 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
424 if (reply) { 414 if (reply) {
425 rep_tlv = (struct tlv_desc *)reply->data; 415 rep_tlv = (struct tlv_desc *)reply->data;
426 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len); 416 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
417 spin_lock_bh(&print_lock);
427 tipc_printbuf_move(&pb, TIPC_LOG); 418 tipc_printbuf_move(&pb, TIPC_LOG);
419 spin_unlock_bh(&print_lock);
428 str_len = strlen(TLV_DATA(rep_tlv)) + 1; 420 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
429 skb_put(reply, TLV_SPACE(str_len)); 421 skb_put(reply, TLV_SPACE(str_len));
430 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 422 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
431 } 423 }
432 } 424 }
433 spin_unlock_bh(&print_lock);
434 return reply; 425 return reply;
435} 426}
436 427
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
index c01b085000e0..5ef1bc8f64ef 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/dbg.h
@@ -2,7 +2,7 @@
2 * net/tipc/dbg.h: Include file for TIPC print buffer routines 2 * net/tipc/dbg.h: Include file for TIPC print buffer routines
3 * 3 *
4 * Copyright (c) 1997-2006, Ericsson AB 4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -42,14 +42,14 @@
42 * @buf: pointer to character array containing print buffer contents 42 * @buf: pointer to character array containing print buffer contents
43 * @size: size of character array 43 * @size: size of character array
44 * @crs: pointer to first unused space in character array (i.e. final NUL) 44 * @crs: pointer to first unused space in character array (i.e. final NUL)
45 * @next: used to link print buffers when printing to more than one at a time 45 * @echo: echo output to system console if non-zero
46 */ 46 */
47 47
48struct print_buf { 48struct print_buf {
49 char *buf; 49 char *buf;
50 u32 size; 50 u32 size;
51 char *crs; 51 char *crs;
52 struct print_buf *next; 52 int echo;
53}; 53};
54 54
55#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */ 55#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
@@ -61,10 +61,10 @@ int tipc_printbuf_empty(struct print_buf *pb);
61int tipc_printbuf_validate(struct print_buf *pb); 61int tipc_printbuf_validate(struct print_buf *pb);
62void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from); 62void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
63 63
64void tipc_log_reinit(int log_size); 64int tipc_log_resize(int log_size);
65void tipc_log_stop(void);
66 65
67struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space); 66struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area,
67 int req_tlv_space);
68struct sk_buff *tipc_log_dump(void); 68struct sk_buff *tipc_log_dump(void);
69 69
70#endif 70#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 5d643e5721eb..1657f0e795ff 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -120,9 +120,8 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
120 120
121 if (buf) { 121 if (buf) {
122 msg = buf_msg(buf); 122 msg = buf_msg(buf);
123 msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE, 123 msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
124 dest_domain); 124 msg_set_non_seq(msg, 1);
125 msg_set_non_seq(msg);
126 msg_set_req_links(msg, req_links); 125 msg_set_req_links(msg, req_links);
127 msg_set_dest_domain(msg, dest_domain); 126 msg_set_dest_domain(msg, dest_domain);
128 msg_set_bc_netid(msg, tipc_net_id); 127 msg_set_bc_netid(msg, tipc_net_id);
@@ -156,11 +155,11 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
156/** 155/**
157 * tipc_disc_recv_msg - handle incoming link setup message (request or response) 156 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
158 * @buf: buffer containing message 157 * @buf: buffer containing message
158 * @b_ptr: bearer that message arrived on
159 */ 159 */
160 160
161void tipc_disc_recv_msg(struct sk_buff *buf) 161void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
162{ 162{
163 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
164 struct link *link; 163 struct link *link;
165 struct tipc_media_addr media_addr; 164 struct tipc_media_addr media_addr;
166 struct tipc_msg *msg = buf_msg(buf); 165 struct tipc_msg *msg = buf_msg(buf);
@@ -200,9 +199,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
200 dbg(" in own cluster\n"); 199 dbg(" in own cluster\n");
201 if (n_ptr == NULL) { 200 if (n_ptr == NULL) {
202 n_ptr = tipc_node_create(orig); 201 n_ptr = tipc_node_create(orig);
203 } 202 if (!n_ptr)
204 if (n_ptr == NULL) { 203 return;
205 return;
206 } 204 }
207 spin_lock_bh(&n_ptr->lock); 205 spin_lock_bh(&n_ptr->lock);
208 link = n_ptr->links[b_ptr->identity]; 206 link = n_ptr->links[b_ptr->identity];
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 9fd7587b143a..c36eaeb7d5d0 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -48,7 +48,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
48void tipc_disc_update_link_req(struct link_req *req); 48void tipc_disc_update_link_req(struct link_req *req);
49void tipc_disc_stop_link_req(struct link_req *req); 49void tipc_disc_stop_link_req(struct link_req *req);
50 50
51void tipc_disc_recv_msg(struct sk_buff *buf); 51void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
52 52
53void tipc_disc_link_event(u32 addr, char *name, int up); 53void tipc_disc_link_event(u32 addr, char *name, int up);
54#if 0 54#if 0
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 9cd35eec3e7f..bc72fbc4f8b8 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -82,7 +82,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
82 dev->dev_addr, clone->len); 82 dev->dev_addr, clone->len);
83 dev_queue_xmit(clone); 83 dev_queue_xmit(clone);
84 } 84 }
85 return TIPC_OK; 85 return 0;
86} 86}
87 87
88/** 88/**
@@ -113,12 +113,12 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
113 if (likely(buf->len == size)) { 113 if (likely(buf->len == size)) {
114 buf->next = NULL; 114 buf->next = NULL;
115 tipc_recv_msg(buf, eb_ptr->bearer); 115 tipc_recv_msg(buf, eb_ptr->bearer);
116 return TIPC_OK; 116 return 0;
117 } 117 }
118 } 118 }
119 } 119 }
120 kfree_skb(buf); 120 kfree_skb(buf);
121 return TIPC_OK; 121 return 0;
122} 122}
123 123
124/** 124/**
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2a26a16e269f..d60113ba4b1b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -51,6 +51,12 @@
51 51
52 52
53/* 53/*
54 * Out-of-range value for link session numbers
55 */
56
57#define INVALID_SESSION 0x10000
58
59/*
54 * Limit for deferred reception queue: 60 * Limit for deferred reception queue:
55 */ 61 */
56 62
@@ -147,9 +153,21 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
147 153
148#define LINK_LOG_BUF_SIZE 0 154#define LINK_LOG_BUF_SIZE 0
149 155
150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0) 156#define dbg_link(fmt, arg...) \
151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0) 157 do { \
152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0) 158 if (LINK_LOG_BUF_SIZE) \
159 tipc_printf(&l_ptr->print_buf, fmt, ## arg); \
160 } while (0)
161#define dbg_link_msg(msg, txt) \
162 do { \
163 if (LINK_LOG_BUF_SIZE) \
164 tipc_msg_dbg(&l_ptr->print_buf, msg, txt); \
165 } while (0)
166#define dbg_link_state(txt) \
167 do { \
168 if (LINK_LOG_BUF_SIZE) \
169 link_print(l_ptr, &l_ptr->print_buf, txt); \
170 } while (0)
153#define dbg_link_dump() do { \ 171#define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \ 172 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \ 173 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
@@ -450,9 +468,9 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
450 468
451 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 469 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
452 msg = l_ptr->pmsg; 470 msg = l_ptr->pmsg;
453 msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 471 msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
454 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 472 msg_set_size(msg, sizeof(l_ptr->proto_msg));
455 msg_set_session(msg, tipc_random); 473 msg_set_session(msg, (tipc_random & 0xffff));
456 msg_set_bearer_id(msg, b_ptr->identity); 474 msg_set_bearer_id(msg, b_ptr->identity);
457 strcpy((char *)msg_data(msg), if_name); 475 strcpy((char *)msg_data(msg), if_name);
458 476
@@ -693,10 +711,10 @@ void tipc_link_reset(struct link *l_ptr)
693 u32 checkpoint = l_ptr->next_in_no; 711 u32 checkpoint = l_ptr->next_in_no;
694 int was_active_link = tipc_link_is_active(l_ptr); 712 int was_active_link = tipc_link_is_active(l_ptr);
695 713
696 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1); 714 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
697 715
698 /* Link is down, accept any session: */ 716 /* Link is down, accept any session */
699 l_ptr->peer_session = 0; 717 l_ptr->peer_session = INVALID_SESSION;
700 718
701 /* Prepare for max packet size negotiation */ 719 /* Prepare for max packet size negotiation */
702 link_init_max_pkt(l_ptr); 720 link_init_max_pkt(l_ptr);
@@ -1110,7 +1128,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1110 1128
1111 if (bundler) { 1129 if (bundler) {
1112 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 1130 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1113 TIPC_OK, INT_H_SIZE, l_ptr->addr); 1131 INT_H_SIZE, l_ptr->addr);
1114 skb_copy_to_linear_data(bundler, &bundler_hdr, 1132 skb_copy_to_linear_data(bundler, &bundler_hdr,
1115 INT_H_SIZE); 1133 INT_H_SIZE);
1116 skb_trim(bundler, INT_H_SIZE); 1134 skb_trim(bundler, INT_H_SIZE);
@@ -1374,7 +1392,7 @@ again:
1374 1392
1375 msg_dbg(hdr, ">FRAGMENTING>"); 1393 msg_dbg(hdr, ">FRAGMENTING>");
1376 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1394 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1377 TIPC_OK, INT_H_SIZE, msg_destnode(hdr)); 1395 INT_H_SIZE, msg_destnode(hdr));
1378 msg_set_link_selector(&fragm_hdr, sender->publ.ref); 1396 msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1379 msg_set_size(&fragm_hdr, max_pkt); 1397 msg_set_size(&fragm_hdr, max_pkt);
1380 msg_set_fragm_no(&fragm_hdr, 1); 1398 msg_set_fragm_no(&fragm_hdr, 1);
@@ -1543,7 +1561,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1543 l_ptr->retransm_queue_head = mod(++r_q_head); 1561 l_ptr->retransm_queue_head = mod(++r_q_head);
1544 l_ptr->retransm_queue_size = --r_q_size; 1562 l_ptr->retransm_queue_size = --r_q_size;
1545 l_ptr->stats.retransmitted++; 1563 l_ptr->stats.retransmitted++;
1546 return TIPC_OK; 1564 return 0;
1547 } else { 1565 } else {
1548 l_ptr->stats.bearer_congs++; 1566 l_ptr->stats.bearer_congs++;
1549 msg_dbg(buf_msg(buf), "|>DEF-RETR>"); 1567 msg_dbg(buf_msg(buf), "|>DEF-RETR>");
@@ -1562,7 +1580,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1562 l_ptr->unacked_window = 0; 1580 l_ptr->unacked_window = 0;
1563 buf_discard(buf); 1581 buf_discard(buf);
1564 l_ptr->proto_msg_queue = NULL; 1582 l_ptr->proto_msg_queue = NULL;
1565 return TIPC_OK; 1583 return 0;
1566 } else { 1584 } else {
1567 msg_dbg(buf_msg(buf), "|>DEF-PROT>"); 1585 msg_dbg(buf_msg(buf), "|>DEF-PROT>");
1568 l_ptr->stats.bearer_congs++; 1586 l_ptr->stats.bearer_congs++;
@@ -1586,7 +1604,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1586 msg_set_type(msg, CLOSED_MSG); 1604 msg_set_type(msg, CLOSED_MSG);
1587 msg_dbg(msg, ">PUSH-DATA>"); 1605 msg_dbg(msg, ">PUSH-DATA>");
1588 l_ptr->next_out = buf->next; 1606 l_ptr->next_out = buf->next;
1589 return TIPC_OK; 1607 return 0;
1590 } else { 1608 } else {
1591 msg_dbg(msg, "|PUSH-DATA|"); 1609 msg_dbg(msg, "|PUSH-DATA|");
1592 l_ptr->stats.bearer_congs++; 1610 l_ptr->stats.bearer_congs++;
@@ -1610,8 +1628,8 @@ void tipc_link_push_queue(struct link *l_ptr)
1610 1628
1611 do { 1629 do {
1612 res = tipc_link_push_packet(l_ptr); 1630 res = tipc_link_push_packet(l_ptr);
1613 } 1631 } while (!res);
1614 while (res == TIPC_OK); 1632
1615 if (res == PUSH_FAILED) 1633 if (res == PUSH_FAILED)
1616 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1634 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1617} 1635}
@@ -1651,7 +1669,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1651 struct tipc_msg *msg = buf_msg(buf); 1669 struct tipc_msg *msg = buf_msg(buf);
1652 1670
1653 warn("Retransmission failure on link <%s>\n", l_ptr->name); 1671 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1654 tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>"); 1672 tipc_msg_dbg(TIPC_OUTPUT, msg, ">RETR-FAIL>");
1655 1673
1656 if (l_ptr->addr) { 1674 if (l_ptr->addr) {
1657 1675
@@ -1748,21 +1766,6 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1748 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1766 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1749} 1767}
1750 1768
1751/*
1752 * link_recv_non_seq: Receive packets which are outside
1753 * the link sequence flow
1754 */
1755
1756static void link_recv_non_seq(struct sk_buff *buf)
1757{
1758 struct tipc_msg *msg = buf_msg(buf);
1759
1760 if (msg_user(msg) == LINK_CONFIG)
1761 tipc_disc_recv_msg(buf);
1762 else
1763 tipc_bclink_recv_pkt(buf);
1764}
1765
1766/** 1769/**
1767 * link_insert_deferred_queue - insert deferred messages back into receive chain 1770 * link_insert_deferred_queue - insert deferred messages back into receive chain
1768 */ 1771 */
@@ -1839,7 +1842,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1839{ 1842{
1840 read_lock_bh(&tipc_net_lock); 1843 read_lock_bh(&tipc_net_lock);
1841 while (head) { 1844 while (head) {
1842 struct bearer *b_ptr; 1845 struct bearer *b_ptr = (struct bearer *)tb_ptr;
1843 struct node *n_ptr; 1846 struct node *n_ptr;
1844 struct link *l_ptr; 1847 struct link *l_ptr;
1845 struct sk_buff *crs; 1848 struct sk_buff *crs;
@@ -1850,9 +1853,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1850 u32 released = 0; 1853 u32 released = 0;
1851 int type; 1854 int type;
1852 1855
1853 b_ptr = (struct bearer *)tb_ptr;
1854 TIPC_SKB_CB(buf)->handle = b_ptr;
1855
1856 head = head->next; 1856 head = head->next;
1857 1857
1858 /* Ensure message is well-formed */ 1858 /* Ensure message is well-formed */
@@ -1871,7 +1871,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1871 msg = buf_msg(buf); 1871 msg = buf_msg(buf);
1872 1872
1873 if (unlikely(msg_non_seq(msg))) { 1873 if (unlikely(msg_non_seq(msg))) {
1874 link_recv_non_seq(buf); 1874 if (msg_user(msg) == LINK_CONFIG)
1875 tipc_disc_recv_msg(buf, b_ptr);
1876 else
1877 tipc_bclink_recv_pkt(buf);
1875 continue; 1878 continue;
1876 } 1879 }
1877 1880
@@ -1978,8 +1981,6 @@ deliver:
1978 if (link_recv_changeover_msg(&l_ptr, &buf)) { 1981 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1979 msg = buf_msg(buf); 1982 msg = buf_msg(buf);
1980 seq_no = msg_seqno(msg); 1983 seq_no = msg_seqno(msg);
1981 TIPC_SKB_CB(buf)->handle
1982 = b_ptr;
1983 if (type == ORIGINAL_MSG) 1984 if (type == ORIGINAL_MSG)
1984 goto deliver; 1985 goto deliver;
1985 goto protocol_check; 1986 goto protocol_check;
@@ -2263,7 +2264,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2263 switch (msg_type(msg)) { 2264 switch (msg_type(msg)) {
2264 2265
2265 case RESET_MSG: 2266 case RESET_MSG:
2266 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) { 2267 if (!link_working_unknown(l_ptr) &&
2268 (l_ptr->peer_session != INVALID_SESSION)) {
2267 if (msg_session(msg) == l_ptr->peer_session) { 2269 if (msg_session(msg) == l_ptr->peer_session) {
2268 dbg("Duplicate RESET: %u<->%u\n", 2270 dbg("Duplicate RESET: %u<->%u\n",
2269 msg_session(msg), l_ptr->peer_session); 2271 msg_session(msg), l_ptr->peer_session);
@@ -2424,7 +2426,7 @@ void tipc_link_changeover(struct link *l_ptr)
2424 } 2426 }
2425 2427
2426 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2428 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2427 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2429 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2428 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2430 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2429 msg_set_msgcnt(&tunnel_hdr, msgcount); 2431 msg_set_msgcnt(&tunnel_hdr, msgcount);
2430 dbg("Link changeover requires %u tunnel messages\n", msgcount); 2432 dbg("Link changeover requires %u tunnel messages\n", msgcount);
@@ -2479,7 +2481,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2479 struct tipc_msg tunnel_hdr; 2481 struct tipc_msg tunnel_hdr;
2480 2482
2481 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2483 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2482 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2484 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2483 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2485 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2484 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2486 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2485 iter = l_ptr->first_out; 2487 iter = l_ptr->first_out;
@@ -2672,10 +2674,12 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2672 u32 pack_sz = link_max_pkt(l_ptr); 2674 u32 pack_sz = link_max_pkt(l_ptr);
2673 u32 fragm_sz = pack_sz - INT_H_SIZE; 2675 u32 fragm_sz = pack_sz - INT_H_SIZE;
2674 u32 fragm_no = 1; 2676 u32 fragm_no = 1;
2675 u32 destaddr = msg_destnode(inmsg); 2677 u32 destaddr;
2676 2678
2677 if (msg_short(inmsg)) 2679 if (msg_short(inmsg))
2678 destaddr = l_ptr->addr; 2680 destaddr = l_ptr->addr;
2681 else
2682 destaddr = msg_destnode(inmsg);
2679 2683
2680 if (msg_routed(inmsg)) 2684 if (msg_routed(inmsg))
2681 msg_set_prevnode(inmsg, tipc_own_addr); 2685 msg_set_prevnode(inmsg, tipc_own_addr);
@@ -2683,7 +2687,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2683 /* Prepare reusable fragment header: */ 2687 /* Prepare reusable fragment header: */
2684 2688
2685 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2689 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2686 TIPC_OK, INT_H_SIZE, destaddr); 2690 INT_H_SIZE, destaddr);
2687 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); 2691 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2688 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); 2692 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2689 msg_set_fragm_no(&fragm_hdr, fragm_no); 2693 msg_set_fragm_no(&fragm_hdr, fragm_no);
@@ -2994,7 +2998,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2994 link_set_supervision_props(l_ptr, new_value); 2998 link_set_supervision_props(l_ptr, new_value);
2995 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2999 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2996 0, 0, new_value, 0, 0); 3000 0, 0, new_value, 0, 0);
2997 res = TIPC_OK; 3001 res = 0;
2998 } 3002 }
2999 break; 3003 break;
3000 case TIPC_CMD_SET_LINK_PRI: 3004 case TIPC_CMD_SET_LINK_PRI:
@@ -3003,14 +3007,14 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
3003 l_ptr->priority = new_value; 3007 l_ptr->priority = new_value;
3004 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 3008 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
3005 0, 0, 0, new_value, 0); 3009 0, 0, 0, new_value, 0);
3006 res = TIPC_OK; 3010 res = 0;
3007 } 3011 }
3008 break; 3012 break;
3009 case TIPC_CMD_SET_LINK_WINDOW: 3013 case TIPC_CMD_SET_LINK_WINDOW:
3010 if ((new_value >= TIPC_MIN_LINK_WIN) && 3014 if ((new_value >= TIPC_MIN_LINK_WIN) &&
3011 (new_value <= TIPC_MAX_LINK_WIN)) { 3015 (new_value <= TIPC_MAX_LINK_WIN)) {
3012 tipc_link_set_queue_limits(l_ptr, new_value); 3016 tipc_link_set_queue_limits(l_ptr, new_value);
3013 res = TIPC_OK; 3017 res = 0;
3014 } 3018 }
3015 break; 3019 break;
3016 } 3020 }
@@ -3226,7 +3230,7 @@ int link_control(const char *name, u32 op, u32 val)
3226 if (op == TIPC_CMD_UNBLOCK_LINK) { 3230 if (op == TIPC_CMD_UNBLOCK_LINK) {
3227 l_ptr->blocked = 0; 3231 l_ptr->blocked = 0;
3228 } 3232 }
3229 res = TIPC_OK; 3233 res = 0;
3230 } 3234 }
3231 tipc_node_unlock(node); 3235 tipc_node_unlock(node);
3232 } 3236 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 696a8633df75..73dcd00d674e 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,7 +41,9 @@
41#include "bearer.h" 41#include "bearer.h"
42 42
43 43
44void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str) 44#ifdef CONFIG_TIPC_DEBUG
45
46void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
45{ 47{
46 u32 usr = msg_user(msg); 48 u32 usr = msg_user(msg);
47 tipc_printf(buf, str); 49 tipc_printf(buf, str);
@@ -228,13 +230,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
228 230
229 switch (usr) { 231 switch (usr) {
230 case CONN_MANAGER: 232 case CONN_MANAGER:
231 case NAME_DISTRIBUTOR:
232 case TIPC_LOW_IMPORTANCE: 233 case TIPC_LOW_IMPORTANCE:
233 case TIPC_MEDIUM_IMPORTANCE: 234 case TIPC_MEDIUM_IMPORTANCE:
234 case TIPC_HIGH_IMPORTANCE: 235 case TIPC_HIGH_IMPORTANCE:
235 case TIPC_CRITICAL_IMPORTANCE: 236 case TIPC_CRITICAL_IMPORTANCE:
236 if (msg_short(msg))
237 break; /* No error */
238 switch (msg_errcode(msg)) { 237 switch (msg_errcode(msg)) {
239 case TIPC_OK: 238 case TIPC_OK:
240 break; 239 break;
@@ -315,9 +314,11 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
315 } 314 }
316 tipc_printf(buf, "\n"); 315 tipc_printf(buf, "\n");
317 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) { 316 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
318 tipc_msg_print(buf,msg_get_wrapped(msg)," /"); 317 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
319 } 318 }
320 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { 319 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
321 tipc_msg_print(buf,msg_get_wrapped(msg)," /"); 320 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
322 } 321 }
323} 322}
323
324#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index ad487e8abcc2..7ee6ae238147 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -2,7 +2,7 @@
2 * net/tipc/msg.h: Include file for TIPC message header routines 2 * net/tipc/msg.h: Include file for TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2007, Ericsson AB 4 * Copyright (c) 2000-2007, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems 5 * Copyright (c) 2005-2008, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,14 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
75 m->hdr[w] |= htonl(val); 75 m->hdr[w] |= htonl(val);
76} 76}
77 77
78static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
79{
80 u32 temp = msg->hdr[a];
81
82 msg->hdr[a] = msg->hdr[b];
83 msg->hdr[b] = temp;
84}
85
78/* 86/*
79 * Word 0 87 * Word 0
80 */ 88 */
@@ -119,9 +127,9 @@ static inline int msg_non_seq(struct tipc_msg *m)
119 return msg_bits(m, 0, 20, 1); 127 return msg_bits(m, 0, 20, 1);
120} 128}
121 129
122static inline void msg_set_non_seq(struct tipc_msg *m) 130static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
123{ 131{
124 msg_set_bits(m, 0, 20, 1, 1); 132 msg_set_bits(m, 0, 20, 1, n);
125} 133}
126 134
127static inline int msg_dest_droppable(struct tipc_msg *m) 135static inline int msg_dest_droppable(struct tipc_msg *m)
@@ -224,6 +232,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
224 msg_set_bits(m, 2, 0, 0xffff, n); 232 msg_set_bits(m, 2, 0, 0xffff, n);
225} 233}
226 234
235/*
236 * TIPC may utilize the "link ack #" and "link seq #" fields of a short
237 * message header to hold the destination node for the message, since the
238 * normal "dest node" field isn't present. This cache is only referenced
239 * when required, so populating the cache of a longer message header is
240 * harmless (as long as the header has the two link sequence fields present).
241 *
242 * Note: Host byte order is OK here, since the info never goes off-card.
243 */
244
245static inline u32 msg_destnode_cache(struct tipc_msg *m)
246{
247 return m->hdr[2];
248}
249
250static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode)
251{
252 m->hdr[2] = dnode;
253}
227 254
228/* 255/*
229 * Words 3-10 256 * Words 3-10
@@ -325,7 +352,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
325 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 352 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
326 w0:|vers |msg usr|hdr sz |n|resrv| packet size | 353 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
327 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 354 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
328 w1:|m typ|rsv=0| sequence gap | broadcast ack no | 355 w1:|m typ| sequence gap | broadcast ack no |
329 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 356 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
330 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to | 357 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
331 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 358 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -388,12 +415,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
388 415
389static inline u32 msg_seq_gap(struct tipc_msg *m) 416static inline u32 msg_seq_gap(struct tipc_msg *m)
390{ 417{
391 return msg_bits(m, 1, 16, 0xff); 418 return msg_bits(m, 1, 16, 0x1fff);
392} 419}
393 420
394static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n) 421static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
395{ 422{
396 msg_set_bits(m, 1, 16, 0xff, n); 423 msg_set_bits(m, 1, 16, 0x1fff, n);
397} 424}
398 425
399static inline u32 msg_req_links(struct tipc_msg *m) 426static inline u32 msg_req_links(struct tipc_msg *m)
@@ -696,7 +723,7 @@ static inline u32 msg_tot_importance(struct tipc_msg *m)
696 723
697 724
698static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, 725static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
699 u32 err, u32 hsize, u32 destnode) 726 u32 hsize, u32 destnode)
700{ 727{
701 memset(m, 0, hsize); 728 memset(m, 0, hsize);
702 msg_set_version(m); 729 msg_set_version(m);
@@ -705,7 +732,6 @@ static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
705 msg_set_size(m, hsize); 732 msg_set_size(m, hsize);
706 msg_set_prevnode(m, tipc_own_addr); 733 msg_set_prevnode(m, tipc_own_addr);
707 msg_set_type(m, type); 734 msg_set_type(m, type);
708 msg_set_errcode(m, err);
709 if (!msg_short(m)) { 735 if (!msg_short(m)) {
710 msg_set_orignode(m, tipc_own_addr); 736 msg_set_orignode(m, tipc_own_addr);
711 msg_set_destnode(m, destnode); 737 msg_set_destnode(m, destnode);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 39fd1619febf..10a69894e2fd 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -41,9 +41,6 @@
41#include "msg.h" 41#include "msg.h"
42#include "name_distr.h" 42#include "name_distr.h"
43 43
44#undef DBG_OUTPUT
45#define DBG_OUTPUT NULL
46
47#define ITEM_SIZE sizeof(struct distr_item) 44#define ITEM_SIZE sizeof(struct distr_item)
48 45
49/** 46/**
@@ -106,8 +103,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
106 103
107 if (buf != NULL) { 104 if (buf != NULL) {
108 msg = buf_msg(buf); 105 msg = buf_msg(buf);
109 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, 106 msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest);
110 LONG_H_SIZE, dest);
111 msg_set_size(msg, LONG_H_SIZE + size); 107 msg_set_size(msg, LONG_H_SIZE + size);
112 } 108 }
113 return buf; 109 return buf;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index ac7dfdda7973..cd72e22b132b 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -2,7 +2,7 @@
2 * net/tipc/name_table.c: TIPC name table code 2 * net/tipc/name_table.c: TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2008, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -52,9 +52,16 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */
52 * struct sub_seq - container for all published instances of a name sequence 52 * struct sub_seq - container for all published instances of a name sequence
53 * @lower: name sequence lower bound 53 * @lower: name sequence lower bound
54 * @upper: name sequence upper bound 54 * @upper: name sequence upper bound
55 * @node_list: circular list of matching publications with >= node scope 55 * @node_list: circular list of publications made by own node
56 * @cluster_list: circular list of matching publications with >= cluster scope 56 * @cluster_list: circular list of publications made by own cluster
57 * @zone_list: circular list of matching publications with >= zone scope 57 * @zone_list: circular list of publications made by own zone
58 * @node_list_size: number of entries in "node_list"
59 * @cluster_list_size: number of entries in "cluster_list"
60 * @zone_list_size: number of entries in "zone_list"
61 *
62 * Note: The zone list always contains at least one entry, since all
63 * publications of the associated name sequence belong to it.
64 * (The cluster and node lists may be empty.)
58 */ 65 */
59 66
60struct sub_seq { 67struct sub_seq {
@@ -63,6 +70,9 @@ struct sub_seq {
63 struct publication *node_list; 70 struct publication *node_list;
64 struct publication *cluster_list; 71 struct publication *cluster_list;
65 struct publication *zone_list; 72 struct publication *zone_list;
73 u32 node_list_size;
74 u32 cluster_list_size;
75 u32 zone_list_size;
66}; 76};
67 77
68/** 78/**
@@ -74,7 +84,7 @@ struct sub_seq {
74 * @first_free: array index of first unused sub-sequence entry 84 * @first_free: array index of first unused sub-sequence entry
75 * @ns_list: links to adjacent name sequences in hash chain 85 * @ns_list: links to adjacent name sequences in hash chain
76 * @subscriptions: list of subscriptions for this 'type' 86 * @subscriptions: list of subscriptions for this 'type'
77 * @lock: spinlock controlling access to name sequence structure 87 * @lock: spinlock controlling access to publication lists of all sub-sequences
78 */ 88 */
79 89
80struct name_seq { 90struct name_seq {
@@ -317,6 +327,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
317 dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n", 327 dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n",
318 publ, node, publ->node, publ->subscr.node); 328 publ, node, publ->node, publ->subscr.node);
319 329
330 sseq->zone_list_size++;
320 if (!sseq->zone_list) 331 if (!sseq->zone_list)
321 sseq->zone_list = publ->zone_list_next = publ; 332 sseq->zone_list = publ->zone_list_next = publ;
322 else { 333 else {
@@ -325,6 +336,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
325 } 336 }
326 337
327 if (in_own_cluster(node)) { 338 if (in_own_cluster(node)) {
339 sseq->cluster_list_size++;
328 if (!sseq->cluster_list) 340 if (!sseq->cluster_list)
329 sseq->cluster_list = publ->cluster_list_next = publ; 341 sseq->cluster_list = publ->cluster_list_next = publ;
330 else { 342 else {
@@ -335,6 +347,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
335 } 347 }
336 348
337 if (node == tipc_own_addr) { 349 if (node == tipc_own_addr) {
350 sseq->node_list_size++;
338 if (!sseq->node_list) 351 if (!sseq->node_list)
339 sseq->node_list = publ->node_list_next = publ; 352 sseq->node_list = publ->node_list_next = publ;
340 else { 353 else {
@@ -411,6 +424,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
411 } else { 424 } else {
412 sseq->zone_list = NULL; 425 sseq->zone_list = NULL;
413 } 426 }
427 sseq->zone_list_size--;
414 428
415 /* Remove publication from cluster scope list, if present */ 429 /* Remove publication from cluster scope list, if present */
416 430
@@ -439,6 +453,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
439 } else { 453 } else {
440 sseq->cluster_list = NULL; 454 sseq->cluster_list = NULL;
441 } 455 }
456 sseq->cluster_list_size--;
442 } 457 }
443end_cluster: 458end_cluster:
444 459
@@ -469,6 +484,7 @@ end_cluster:
469 } else { 484 } else {
470 sseq->node_list = NULL; 485 sseq->node_list = NULL;
471 } 486 }
487 sseq->node_list_size--;
472 } 488 }
473end_node: 489end_node:
474 490
@@ -709,15 +725,18 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
709 725
710 if (sseq->lower > upper) 726 if (sseq->lower > upper)
711 break; 727 break;
712 publ = sseq->cluster_list; 728
713 if (publ && (publ->scope <= limit)) 729 publ = sseq->node_list;
730 if (publ) {
714 do { 731 do {
715 if (publ->node == tipc_own_addr) 732 if (publ->scope <= limit)
716 tipc_port_list_add(dports, publ->ref); 733 tipc_port_list_add(dports, publ->ref);
717 else 734 publ = publ->node_list_next;
718 res = 1; 735 } while (publ != sseq->node_list);
719 publ = publ->cluster_list_next; 736 }
720 } while (publ != sseq->cluster_list); 737
738 if (sseq->cluster_list_size != sseq->node_list_size)
739 res = 1;
721 } 740 }
722 741
723 spin_unlock_bh(&seq->lock); 742 spin_unlock_bh(&seq->lock);
@@ -905,6 +924,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
905 struct sub_seq *sseq; 924 struct sub_seq *sseq;
906 char typearea[11]; 925 char typearea[11];
907 926
927 if (seq->first_free == 0)
928 return;
929
908 sprintf(typearea, "%-10u", seq->type); 930 sprintf(typearea, "%-10u", seq->type);
909 931
910 if (depth == 1) { 932 if (depth == 1) {
@@ -915,7 +937,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
915 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { 937 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
916 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { 938 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
917 tipc_printf(buf, "%s ", typearea); 939 tipc_printf(buf, "%s ", typearea);
940 spin_lock_bh(&seq->lock);
918 subseq_list(sseq, buf, depth, index); 941 subseq_list(sseq, buf, depth, index);
942 spin_unlock_bh(&seq->lock);
919 sprintf(typearea, "%10s", " "); 943 sprintf(typearea, "%10s", " ");
920 } 944 }
921 } 945 }
@@ -1050,15 +1074,12 @@ void tipc_nametbl_dump(void)
1050 1074
1051int tipc_nametbl_init(void) 1075int tipc_nametbl_init(void)
1052{ 1076{
1053 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1077 table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
1054 1078 GFP_ATOMIC);
1055 table.types = kzalloc(array_size, GFP_ATOMIC);
1056 if (!table.types) 1079 if (!table.types)
1057 return -ENOMEM; 1080 return -ENOMEM;
1058 1081
1059 write_lock_bh(&tipc_nametbl_lock);
1060 table.local_publ_count = 0; 1082 table.local_publ_count = 0;
1061 write_unlock_bh(&tipc_nametbl_lock);
1062 return 0; 1083 return 0;
1063} 1084}
1064 1085
diff --git a/net/tipc/net.c b/net/tipc/net.c
index c39c76201e8e..ec7b04fbdc43 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -165,7 +165,7 @@ static int net_init(void)
165 if (!tipc_net.zones) { 165 if (!tipc_net.zones) {
166 return -ENOMEM; 166 return -ENOMEM;
167 } 167 }
168 return TIPC_OK; 168 return 0;
169} 169}
170 170
171static void net_stop(void) 171static void net_stop(void)
@@ -266,7 +266,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
266 tipc_link_send(buf, dnode, msg_link_selector(msg)); 266 tipc_link_send(buf, dnode, msg_link_selector(msg));
267} 267}
268 268
269int tipc_net_start(void) 269int tipc_net_start(u32 addr)
270{ 270{
271 char addr_string[16]; 271 char addr_string[16];
272 int res; 272 int res;
@@ -274,6 +274,10 @@ int tipc_net_start(void)
274 if (tipc_mode != TIPC_NODE_MODE) 274 if (tipc_mode != TIPC_NODE_MODE)
275 return -ENOPROTOOPT; 275 return -ENOPROTOOPT;
276 276
277 tipc_subscr_stop();
278 tipc_cfg_stop();
279
280 tipc_own_addr = addr;
277 tipc_mode = TIPC_NET_MODE; 281 tipc_mode = TIPC_NET_MODE;
278 tipc_named_reinit(); 282 tipc_named_reinit();
279 tipc_port_reinit(); 283 tipc_port_reinit();
@@ -284,14 +288,14 @@ int tipc_net_start(void)
284 (res = tipc_bclink_init())) { 288 (res = tipc_bclink_init())) {
285 return res; 289 return res;
286 } 290 }
287 tipc_subscr_stop(); 291
288 tipc_cfg_stop();
289 tipc_k_signal((Handler)tipc_subscr_start, 0); 292 tipc_k_signal((Handler)tipc_subscr_start, 0);
290 tipc_k_signal((Handler)tipc_cfg_init, 0); 293 tipc_k_signal((Handler)tipc_cfg_init, 0);
294
291 info("Started in network mode\n"); 295 info("Started in network mode\n");
292 info("Own node address %s, network identity %u\n", 296 info("Own node address %s, network identity %u\n",
293 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 297 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
294 return TIPC_OK; 298 return 0;
295} 299}
296 300
297void tipc_net_stop(void) 301void tipc_net_stop(void)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index a6a0e9976ac9..d154ac2bda9a 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -58,7 +58,7 @@ void tipc_net_route_msg(struct sk_buff *buf);
58struct node *tipc_net_select_remote_node(u32 addr, u32 ref); 58struct node *tipc_net_select_remote_node(u32 addr, u32 ref);
59u32 tipc_net_select_router(u32 addr, u32 ref); 59u32 tipc_net_select_router(u32 addr, u32 ref);
60 60
61int tipc_net_start(void); 61int tipc_net_start(u32 addr);
62void tipc_net_stop(void); 62void tipc_net_stop(void);
63 63
64#endif 64#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 6a7f7b4c2595..c387217bb230 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -2,7 +2,7 @@
2 * net/tipc/netlink.c: TIPC configuration handling 2 * net/tipc/netlink.c: TIPC configuration handling
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -45,15 +45,17 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
45 struct nlmsghdr *req_nlh = info->nlhdr; 45 struct nlmsghdr *req_nlh = info->nlhdr;
46 struct tipc_genlmsghdr *req_userhdr = info->userhdr; 46 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); 47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48 u16 cmd;
48 49
49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) 50 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
50 rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); 51 cmd = TIPC_CMD_NOT_NET_ADMIN;
51 else 52 else
52 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, 53 cmd = req_userhdr->cmd;
53 req_userhdr->cmd, 54
54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, 55 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), 56 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
56 hdr_space); 57 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
58 hdr_space);
57 59
58 if (rep_buf) { 60 if (rep_buf) {
59 skb_push(rep_buf, hdr_space); 61 skb_push(rep_buf, hdr_space);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 598f4d3a0098..ee952ad60218 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -52,16 +52,40 @@ static void node_established_contact(struct node *n_ptr);
52 52
53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ 53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */
54 54
55static DEFINE_SPINLOCK(node_create_lock);
56
55u32 tipc_own_tag = 0; 57u32 tipc_own_tag = 0;
56 58
59/**
60 * tipc_node_create - create neighboring node
61 *
62 * Currently, this routine is called by neighbor discovery code, which holds
63 * net_lock for reading only. We must take node_create_lock to ensure a node
64 * isn't created twice if two different bearers discover the node at the same
65 * time. (It would be preferable to switch to holding net_lock in write mode,
66 * but this is a non-trivial change.)
67 */
68
57struct node *tipc_node_create(u32 addr) 69struct node *tipc_node_create(u32 addr)
58{ 70{
59 struct cluster *c_ptr; 71 struct cluster *c_ptr;
60 struct node *n_ptr; 72 struct node *n_ptr;
61 struct node **curr_node; 73 struct node **curr_node;
62 74
75 spin_lock_bh(&node_create_lock);
76
77 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
78 if (addr < n_ptr->addr)
79 break;
80 if (addr == n_ptr->addr) {
81 spin_unlock_bh(&node_create_lock);
82 return n_ptr;
83 }
84 }
85
63 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 86 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
64 if (!n_ptr) { 87 if (!n_ptr) {
88 spin_unlock_bh(&node_create_lock);
65 warn("Node creation failed, no memory\n"); 89 warn("Node creation failed, no memory\n");
66 return NULL; 90 return NULL;
67 } 91 }
@@ -71,6 +95,7 @@ struct node *tipc_node_create(u32 addr)
71 c_ptr = tipc_cltr_create(addr); 95 c_ptr = tipc_cltr_create(addr);
72 } 96 }
73 if (!c_ptr) { 97 if (!c_ptr) {
98 spin_unlock_bh(&node_create_lock);
74 kfree(n_ptr); 99 kfree(n_ptr);
75 return NULL; 100 return NULL;
76 } 101 }
@@ -91,6 +116,7 @@ struct node *tipc_node_create(u32 addr)
91 } 116 }
92 } 117 }
93 (*curr_node) = n_ptr; 118 (*curr_node) = n_ptr;
119 spin_unlock_bh(&node_create_lock);
94 return n_ptr; 120 return n_ptr;
95} 121}
96 122
@@ -574,12 +600,14 @@ u32 tipc_available_nodes(const u32 domain)
574 struct node *n_ptr; 600 struct node *n_ptr;
575 u32 cnt = 0; 601 u32 cnt = 0;
576 602
603 read_lock_bh(&tipc_net_lock);
577 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 604 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
578 if (!in_scope(domain, n_ptr->addr)) 605 if (!in_scope(domain, n_ptr->addr))
579 continue; 606 continue;
580 if (tipc_node_is_up(n_ptr)) 607 if (tipc_node_is_up(n_ptr))
581 cnt++; 608 cnt++;
582 } 609 }
610 read_unlock_bh(&tipc_net_lock);
583 return cnt; 611 return cnt;
584} 612}
585 613
@@ -599,19 +627,26 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
599 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 627 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
600 " (network address)"); 628 " (network address)");
601 629
602 if (!tipc_nodes) 630 read_lock_bh(&tipc_net_lock);
631 if (!tipc_nodes) {
632 read_unlock_bh(&tipc_net_lock);
603 return tipc_cfg_reply_none(); 633 return tipc_cfg_reply_none();
634 }
604 635
605 /* For now, get space for all other nodes 636 /* For now, get space for all other nodes
606 (will need to modify this when slave nodes are supported */ 637 (will need to modify this when slave nodes are supported */
607 638
608 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 639 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1);
609 if (payload_size > 32768u) 640 if (payload_size > 32768u) {
641 read_unlock_bh(&tipc_net_lock);
610 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 642 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
611 " (too many nodes)"); 643 " (too many nodes)");
644 }
612 buf = tipc_cfg_reply_alloc(payload_size); 645 buf = tipc_cfg_reply_alloc(payload_size);
613 if (!buf) 646 if (!buf) {
647 read_unlock_bh(&tipc_net_lock);
614 return NULL; 648 return NULL;
649 }
615 650
616 /* Add TLVs for all nodes in scope */ 651 /* Add TLVs for all nodes in scope */
617 652
@@ -624,6 +659,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
624 &node_info, sizeof(node_info)); 659 &node_info, sizeof(node_info));
625 } 660 }
626 661
662 read_unlock_bh(&tipc_net_lock);
627 return buf; 663 return buf;
628} 664}
629 665
@@ -646,16 +682,22 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
646 if (tipc_mode != TIPC_NET_MODE) 682 if (tipc_mode != TIPC_NET_MODE)
647 return tipc_cfg_reply_none(); 683 return tipc_cfg_reply_none();
648 684
685 read_lock_bh(&tipc_net_lock);
686
649 /* Get space for all unicast links + multicast link */ 687 /* Get space for all unicast links + multicast link */
650 688
651 payload_size = TLV_SPACE(sizeof(link_info)) * 689 payload_size = TLV_SPACE(sizeof(link_info)) *
652 (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); 690 (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1);
653 if (payload_size > 32768u) 691 if (payload_size > 32768u) {
692 read_unlock_bh(&tipc_net_lock);
654 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 693 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
655 " (too many links)"); 694 " (too many links)");
695 }
656 buf = tipc_cfg_reply_alloc(payload_size); 696 buf = tipc_cfg_reply_alloc(payload_size);
657 if (!buf) 697 if (!buf) {
698 read_unlock_bh(&tipc_net_lock);
658 return NULL; 699 return NULL;
700 }
659 701
660 /* Add TLV for broadcast link */ 702 /* Add TLV for broadcast link */
661 703
@@ -671,6 +713,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
671 713
672 if (!in_scope(domain, n_ptr->addr)) 714 if (!in_scope(domain, n_ptr->addr))
673 continue; 715 continue;
716 tipc_node_lock(n_ptr);
674 for (i = 0; i < MAX_BEARERS; i++) { 717 for (i = 0; i < MAX_BEARERS; i++) {
675 if (!n_ptr->links[i]) 718 if (!n_ptr->links[i])
676 continue; 719 continue;
@@ -680,7 +723,9 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
680 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 723 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
681 &link_info, sizeof(link_info)); 724 &link_info, sizeof(link_info));
682 } 725 }
726 tipc_node_unlock(n_ptr);
683 } 727 }
684 728
729 read_unlock_bh(&tipc_net_lock);
685 return buf; 730 return buf;
686} 731}
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 2f5806410c64..e70d27ea6578 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -2,7 +2,7 @@
2 * net/tipc/port.c: TIPC port code 2 * net/tipc/port.c: TIPC port code
3 * 3 *
4 * Copyright (c) 1992-2007, Ericsson AB 4 * Copyright (c) 1992-2007, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems 5 * Copyright (c) 2004-2008, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -211,12 +211,12 @@ exit:
211} 211}
212 212
213/** 213/**
214 * tipc_createport_raw - create a native TIPC port 214 * tipc_createport_raw - create a generic TIPC port
215 * 215 *
216 * Returns local port reference 216 * Returns pointer to (locked) TIPC port, or NULL if unable to create it
217 */ 217 */
218 218
219u32 tipc_createport_raw(void *usr_handle, 219struct tipc_port *tipc_createport_raw(void *usr_handle,
220 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 220 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
221 void (*wakeup)(struct tipc_port *), 221 void (*wakeup)(struct tipc_port *),
222 const u32 importance) 222 const u32 importance)
@@ -228,26 +228,21 @@ u32 tipc_createport_raw(void *usr_handle,
228 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); 228 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
229 if (!p_ptr) { 229 if (!p_ptr) {
230 warn("Port creation failed, no memory\n"); 230 warn("Port creation failed, no memory\n");
231 return 0; 231 return NULL;
232 } 232 }
233 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 233 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
234 if (!ref) { 234 if (!ref) {
235 warn("Port creation failed, reference table exhausted\n"); 235 warn("Port creation failed, reference table exhausted\n");
236 kfree(p_ptr); 236 kfree(p_ptr);
237 return 0; 237 return NULL;
238 } 238 }
239 239
240 tipc_port_lock(ref);
241 p_ptr->publ.usr_handle = usr_handle; 240 p_ptr->publ.usr_handle = usr_handle;
242 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; 241 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
243 p_ptr->publ.ref = ref; 242 p_ptr->publ.ref = ref;
244 msg = &p_ptr->publ.phdr; 243 msg = &p_ptr->publ.phdr;
245 msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 244 msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
246 0);
247 msg_set_orignode(msg, tipc_own_addr);
248 msg_set_prevnode(msg, tipc_own_addr);
249 msg_set_origport(msg, ref); 245 msg_set_origport(msg, ref);
250 msg_set_importance(msg,importance);
251 p_ptr->last_in_seqno = 41; 246 p_ptr->last_in_seqno = 41;
252 p_ptr->sent = 1; 247 p_ptr->sent = 1;
253 INIT_LIST_HEAD(&p_ptr->wait_list); 248 INIT_LIST_HEAD(&p_ptr->wait_list);
@@ -262,8 +257,7 @@ u32 tipc_createport_raw(void *usr_handle,
262 INIT_LIST_HEAD(&p_ptr->port_list); 257 INIT_LIST_HEAD(&p_ptr->port_list);
263 list_add_tail(&p_ptr->port_list, &ports); 258 list_add_tail(&p_ptr->port_list, &ports);
264 spin_unlock_bh(&tipc_port_list_lock); 259 spin_unlock_bh(&tipc_port_list_lock);
265 tipc_port_unlock(p_ptr); 260 return &(p_ptr->publ);
266 return ref;
267} 261}
268 262
269int tipc_deleteport(u32 ref) 263int tipc_deleteport(u32 ref)
@@ -297,7 +291,7 @@ int tipc_deleteport(u32 ref)
297 kfree(p_ptr); 291 kfree(p_ptr);
298 dbg("Deleted port %u\n", ref); 292 dbg("Deleted port %u\n", ref);
299 tipc_net_route_msg(buf); 293 tipc_net_route_msg(buf);
300 return TIPC_OK; 294 return 0;
301} 295}
302 296
303/** 297/**
@@ -342,7 +336,7 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
342 return -EINVAL; 336 return -EINVAL;
343 *isunreliable = port_unreliable(p_ptr); 337 *isunreliable = port_unreliable(p_ptr);
344 tipc_port_unlock(p_ptr); 338 tipc_port_unlock(p_ptr);
345 return TIPC_OK; 339 return 0;
346} 340}
347 341
348int tipc_set_portunreliable(u32 ref, unsigned int isunreliable) 342int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
@@ -354,7 +348,7 @@ int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
354 return -EINVAL; 348 return -EINVAL;
355 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0)); 349 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
356 tipc_port_unlock(p_ptr); 350 tipc_port_unlock(p_ptr);
357 return TIPC_OK; 351 return 0;
358} 352}
359 353
360static int port_unreturnable(struct port *p_ptr) 354static int port_unreturnable(struct port *p_ptr)
@@ -371,7 +365,7 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
371 return -EINVAL; 365 return -EINVAL;
372 *isunrejectable = port_unreturnable(p_ptr); 366 *isunrejectable = port_unreturnable(p_ptr);
373 tipc_port_unlock(p_ptr); 367 tipc_port_unlock(p_ptr);
374 return TIPC_OK; 368 return 0;
375} 369}
376 370
377int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable) 371int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
@@ -383,7 +377,7 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
383 return -EINVAL; 377 return -EINVAL;
384 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0)); 378 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
385 tipc_port_unlock(p_ptr); 379 tipc_port_unlock(p_ptr);
386 return TIPC_OK; 380 return 0;
387} 381}
388 382
389/* 383/*
@@ -402,10 +396,10 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
402 buf = buf_acquire(LONG_H_SIZE); 396 buf = buf_acquire(LONG_H_SIZE);
403 if (buf) { 397 if (buf) {
404 msg = buf_msg(buf); 398 msg = buf_msg(buf);
405 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode); 399 msg_init(msg, usr, type, LONG_H_SIZE, destnode);
400 msg_set_errcode(msg, err);
406 msg_set_destport(msg, destport); 401 msg_set_destport(msg, destport);
407 msg_set_origport(msg, origport); 402 msg_set_origport(msg, origport);
408 msg_set_destnode(msg, destnode);
409 msg_set_orignode(msg, orignode); 403 msg_set_orignode(msg, orignode);
410 msg_set_transp_seqno(msg, seqno); 404 msg_set_transp_seqno(msg, seqno);
411 msg_set_msgcnt(msg, ack); 405 msg_set_msgcnt(msg, ack);
@@ -446,17 +440,19 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
446 return data_sz; 440 return data_sz;
447 } 441 }
448 rmsg = buf_msg(rbuf); 442 rmsg = buf_msg(rbuf);
449 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg)); 443 msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg));
444 msg_set_errcode(rmsg, err);
450 msg_set_destport(rmsg, msg_origport(msg)); 445 msg_set_destport(rmsg, msg_origport(msg));
451 msg_set_prevnode(rmsg, tipc_own_addr);
452 msg_set_origport(rmsg, msg_destport(msg)); 446 msg_set_origport(rmsg, msg_destport(msg));
453 if (msg_short(msg)) 447 if (msg_short(msg)) {
454 msg_set_orignode(rmsg, tipc_own_addr); 448 msg_set_orignode(rmsg, tipc_own_addr);
455 else 449 /* leave name type & instance as zeroes */
450 } else {
456 msg_set_orignode(rmsg, msg_destnode(msg)); 451 msg_set_orignode(rmsg, msg_destnode(msg));
452 msg_set_nametype(rmsg, msg_nametype(msg));
453 msg_set_nameinst(rmsg, msg_nameinst(msg));
454 }
457 msg_set_size(rmsg, data_sz + hdr_sz); 455 msg_set_size(rmsg, data_sz + hdr_sz);
458 msg_set_nametype(rmsg, msg_nametype(msg));
459 msg_set_nameinst(rmsg, msg_nameinst(msg));
460 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); 456 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz);
461 457
462 /* send self-abort message when rejecting on a connected port */ 458 /* send self-abort message when rejecting on a connected port */
@@ -778,6 +774,7 @@ void tipc_port_reinit(void)
778 msg = &p_ptr->publ.phdr; 774 msg = &p_ptr->publ.phdr;
779 if (msg_orignode(msg) == tipc_own_addr) 775 if (msg_orignode(msg) == tipc_own_addr)
780 break; 776 break;
777 msg_set_prevnode(msg, tipc_own_addr);
781 msg_set_orignode(msg, tipc_own_addr); 778 msg_set_orignode(msg, tipc_own_addr);
782 } 779 }
783 spin_unlock_bh(&tipc_port_list_lock); 780 spin_unlock_bh(&tipc_port_list_lock);
@@ -838,16 +835,13 @@ static void port_dispatcher_sigh(void *dummy)
838 u32 peer_node = port_peernode(p_ptr); 835 u32 peer_node = port_peernode(p_ptr);
839 836
840 tipc_port_unlock(p_ptr); 837 tipc_port_unlock(p_ptr);
838 if (unlikely(!cb))
839 goto reject;
841 if (unlikely(!connected)) { 840 if (unlikely(!connected)) {
842 if (unlikely(published)) 841 if (tipc_connect2port(dref, &orig))
843 goto reject; 842 goto reject;
844 tipc_connect2port(dref,&orig); 843 } else if ((msg_origport(msg) != peer_port) ||
845 } 844 (msg_orignode(msg) != peer_node))
846 if (unlikely(msg_origport(msg) != peer_port))
847 goto reject;
848 if (unlikely(msg_orignode(msg) != peer_node))
849 goto reject;
850 if (unlikely(!cb))
851 goto reject; 845 goto reject;
852 if (unlikely(++p_ptr->publ.conn_unacked >= 846 if (unlikely(++p_ptr->publ.conn_unacked >=
853 TIPC_FLOW_CONTROL_WIN)) 847 TIPC_FLOW_CONTROL_WIN))
@@ -862,9 +856,7 @@ static void port_dispatcher_sigh(void *dummy)
862 tipc_msg_event cb = up_ptr->msg_cb; 856 tipc_msg_event cb = up_ptr->msg_cb;
863 857
864 tipc_port_unlock(p_ptr); 858 tipc_port_unlock(p_ptr);
865 if (unlikely(connected)) 859 if (unlikely(!cb || connected))
866 goto reject;
867 if (unlikely(!cb))
868 goto reject; 860 goto reject;
869 skb_pull(buf, msg_hdr_sz(msg)); 861 skb_pull(buf, msg_hdr_sz(msg));
870 cb(usr_handle, dref, &buf, msg_data(msg), 862 cb(usr_handle, dref, &buf, msg_data(msg),
@@ -877,11 +869,7 @@ static void port_dispatcher_sigh(void *dummy)
877 tipc_named_msg_event cb = up_ptr->named_msg_cb; 869 tipc_named_msg_event cb = up_ptr->named_msg_cb;
878 870
879 tipc_port_unlock(p_ptr); 871 tipc_port_unlock(p_ptr);
880 if (unlikely(connected)) 872 if (unlikely(!cb || connected || !published))
881 goto reject;
882 if (unlikely(!cb))
883 goto reject;
884 if (unlikely(!published))
885 goto reject; 873 goto reject;
886 dseq.type = msg_nametype(msg); 874 dseq.type = msg_nametype(msg);
887 dseq.lower = msg_nameinst(msg); 875 dseq.lower = msg_nameinst(msg);
@@ -908,11 +896,10 @@ err:
908 u32 peer_node = port_peernode(p_ptr); 896 u32 peer_node = port_peernode(p_ptr);
909 897
910 tipc_port_unlock(p_ptr); 898 tipc_port_unlock(p_ptr);
911 if (!connected || !cb) 899 if (!cb || !connected)
912 break;
913 if (msg_origport(msg) != peer_port)
914 break; 900 break;
915 if (msg_orignode(msg) != peer_node) 901 if ((msg_origport(msg) != peer_port) ||
902 (msg_orignode(msg) != peer_node))
916 break; 903 break;
917 tipc_disconnect(dref); 904 tipc_disconnect(dref);
918 skb_pull(buf, msg_hdr_sz(msg)); 905 skb_pull(buf, msg_hdr_sz(msg));
@@ -924,7 +911,7 @@ err:
924 tipc_msg_err_event cb = up_ptr->err_cb; 911 tipc_msg_err_event cb = up_ptr->err_cb;
925 912
926 tipc_port_unlock(p_ptr); 913 tipc_port_unlock(p_ptr);
927 if (connected || !cb) 914 if (!cb || connected)
928 break; 915 break;
929 skb_pull(buf, msg_hdr_sz(msg)); 916 skb_pull(buf, msg_hdr_sz(msg));
930 cb(usr_handle, dref, &buf, msg_data(msg), 917 cb(usr_handle, dref, &buf, msg_data(msg),
@@ -937,7 +924,7 @@ err:
937 up_ptr->named_err_cb; 924 up_ptr->named_err_cb;
938 925
939 tipc_port_unlock(p_ptr); 926 tipc_port_unlock(p_ptr);
940 if (connected || !cb) 927 if (!cb || connected)
941 break; 928 break;
942 dseq.type = msg_nametype(msg); 929 dseq.type = msg_nametype(msg);
943 dseq.lower = msg_nameinst(msg); 930 dseq.lower = msg_nameinst(msg);
@@ -976,7 +963,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
976 tipc_k_signal((Handler)port_dispatcher_sigh, 0); 963 tipc_k_signal((Handler)port_dispatcher_sigh, 0);
977 } 964 }
978 spin_unlock_bh(&queue_lock); 965 spin_unlock_bh(&queue_lock);
979 return TIPC_OK; 966 return 0;
980} 967}
981 968
982/* 969/*
@@ -1053,15 +1040,14 @@ int tipc_createport(u32 user_ref,
1053{ 1040{
1054 struct user_port *up_ptr; 1041 struct user_port *up_ptr;
1055 struct port *p_ptr; 1042 struct port *p_ptr;
1056 u32 ref;
1057 1043
1058 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1044 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1059 if (!up_ptr) { 1045 if (!up_ptr) {
1060 warn("Port creation failed, no memory\n"); 1046 warn("Port creation failed, no memory\n");
1061 return -ENOMEM; 1047 return -ENOMEM;
1062 } 1048 }
1063 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); 1049 p_ptr = (struct port *)tipc_createport_raw(NULL, port_dispatcher,
1064 p_ptr = tipc_port_lock(ref); 1050 port_wakeup, importance);
1065 if (!p_ptr) { 1051 if (!p_ptr) {
1066 kfree(up_ptr); 1052 kfree(up_ptr);
1067 return -ENOMEM; 1053 return -ENOMEM;
@@ -1081,16 +1067,15 @@ int tipc_createport(u32 user_ref,
1081 INIT_LIST_HEAD(&up_ptr->uport_list); 1067 INIT_LIST_HEAD(&up_ptr->uport_list);
1082 tipc_reg_add_port(up_ptr); 1068 tipc_reg_add_port(up_ptr);
1083 *portref = p_ptr->publ.ref; 1069 *portref = p_ptr->publ.ref;
1084 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1085 tipc_port_unlock(p_ptr); 1070 tipc_port_unlock(p_ptr);
1086 return TIPC_OK; 1071 return 0;
1087} 1072}
1088 1073
1089int tipc_ownidentity(u32 ref, struct tipc_portid *id) 1074int tipc_ownidentity(u32 ref, struct tipc_portid *id)
1090{ 1075{
1091 id->ref = ref; 1076 id->ref = ref;
1092 id->node = tipc_own_addr; 1077 id->node = tipc_own_addr;
1093 return TIPC_OK; 1078 return 0;
1094} 1079}
1095 1080
1096int tipc_portimportance(u32 ref, unsigned int *importance) 1081int tipc_portimportance(u32 ref, unsigned int *importance)
@@ -1102,7 +1087,7 @@ int tipc_portimportance(u32 ref, unsigned int *importance)
1102 return -EINVAL; 1087 return -EINVAL;
1103 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr); 1088 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
1104 tipc_port_unlock(p_ptr); 1089 tipc_port_unlock(p_ptr);
1105 return TIPC_OK; 1090 return 0;
1106} 1091}
1107 1092
1108int tipc_set_portimportance(u32 ref, unsigned int imp) 1093int tipc_set_portimportance(u32 ref, unsigned int imp)
@@ -1117,7 +1102,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
1117 return -EINVAL; 1102 return -EINVAL;
1118 msg_set_importance(&p_ptr->publ.phdr, (u32)imp); 1103 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
1119 tipc_port_unlock(p_ptr); 1104 tipc_port_unlock(p_ptr);
1120 return TIPC_OK; 1105 return 0;
1121} 1106}
1122 1107
1123 1108
@@ -1152,7 +1137,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1152 list_add(&publ->pport_list, &p_ptr->publications); 1137 list_add(&publ->pport_list, &p_ptr->publications);
1153 p_ptr->pub_count++; 1138 p_ptr->pub_count++;
1154 p_ptr->publ.published = 1; 1139 p_ptr->publ.published = 1;
1155 res = TIPC_OK; 1140 res = 0;
1156 } 1141 }
1157exit: 1142exit:
1158 tipc_port_unlock(p_ptr); 1143 tipc_port_unlock(p_ptr);
@@ -1175,7 +1160,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1175 tipc_nametbl_withdraw(publ->type, publ->lower, 1160 tipc_nametbl_withdraw(publ->type, publ->lower,
1176 publ->ref, publ->key); 1161 publ->ref, publ->key);
1177 } 1162 }
1178 res = TIPC_OK; 1163 res = 0;
1179 } else { 1164 } else {
1180 list_for_each_entry_safe(publ, tpubl, 1165 list_for_each_entry_safe(publ, tpubl,
1181 &p_ptr->publications, pport_list) { 1166 &p_ptr->publications, pport_list) {
@@ -1189,7 +1174,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1189 break; 1174 break;
1190 tipc_nametbl_withdraw(publ->type, publ->lower, 1175 tipc_nametbl_withdraw(publ->type, publ->lower,
1191 publ->ref, publ->key); 1176 publ->ref, publ->key);
1192 res = TIPC_OK; 1177 res = 0;
1193 break; 1178 break;
1194 } 1179 }
1195 } 1180 }
@@ -1233,7 +1218,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1233 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node, 1218 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
1234 (void *)(unsigned long)ref, 1219 (void *)(unsigned long)ref,
1235 (net_ev_handler)port_handle_node_down); 1220 (net_ev_handler)port_handle_node_down);
1236 res = TIPC_OK; 1221 res = 0;
1237exit: 1222exit:
1238 tipc_port_unlock(p_ptr); 1223 tipc_port_unlock(p_ptr);
1239 p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref); 1224 p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
@@ -1255,7 +1240,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
1255 /* let timer expire on it's own to avoid deadlock! */ 1240 /* let timer expire on it's own to avoid deadlock! */
1256 tipc_nodesub_unsubscribe( 1241 tipc_nodesub_unsubscribe(
1257 &((struct port *)tp_ptr)->subscription); 1242 &((struct port *)tp_ptr)->subscription);
1258 res = TIPC_OK; 1243 res = 0;
1259 } else { 1244 } else {
1260 res = -ENOTCONN; 1245 res = -ENOTCONN;
1261 } 1246 }
@@ -1320,7 +1305,7 @@ int tipc_isconnected(u32 ref, int *isconnected)
1320 return -EINVAL; 1305 return -EINVAL;
1321 *isconnected = p_ptr->publ.connected; 1306 *isconnected = p_ptr->publ.connected;
1322 tipc_port_unlock(p_ptr); 1307 tipc_port_unlock(p_ptr);
1323 return TIPC_OK; 1308 return 0;
1324} 1309}
1325 1310
1326int tipc_peer(u32 ref, struct tipc_portid *peer) 1311int tipc_peer(u32 ref, struct tipc_portid *peer)
@@ -1334,7 +1319,7 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
1334 if (p_ptr->publ.connected) { 1319 if (p_ptr->publ.connected) {
1335 peer->ref = port_peerport(p_ptr); 1320 peer->ref = port_peerport(p_ptr);
1336 peer->node = port_peernode(p_ptr); 1321 peer->node = port_peernode(p_ptr);
1337 res = TIPC_OK; 1322 res = 0;
1338 } else 1323 } else
1339 res = -ENOTCONN; 1324 res = -ENOTCONN;
1340 tipc_port_unlock(p_ptr); 1325 tipc_port_unlock(p_ptr);
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 89cbab24d08f..414fc34b8bea 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -123,7 +123,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
123 tipc_ref_table.index_mask = actual_size - 1; 123 tipc_ref_table.index_mask = actual_size - 1;
124 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; 124 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
125 125
126 return TIPC_OK; 126 return 0;
127} 127}
128 128
129/** 129/**
@@ -142,9 +142,13 @@ void tipc_ref_table_stop(void)
142/** 142/**
143 * tipc_ref_acquire - create reference to an object 143 * tipc_ref_acquire - create reference to an object
144 * 144 *
145 * Return a unique reference value which can be translated back to the pointer 145 * Register an object pointer in reference table and lock the object.
146 * 'object' at a later time. Also, pass back a pointer to the lock protecting 146 * Returns a unique reference value that is used from then on to retrieve the
147 * the object, but without locking it. 147 * object pointer, or to determine that the object has been deregistered.
148 *
149 * Note: The object is returned in the locked state so that the caller can
150 * register a partially initialized object, without running the risk that
151 * the object will be accessed before initialization is complete.
148 */ 152 */
149 153
150u32 tipc_ref_acquire(void *object, spinlock_t **lock) 154u32 tipc_ref_acquire(void *object, spinlock_t **lock)
@@ -178,13 +182,13 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
178 ref = (next_plus_upper & ~index_mask) + index; 182 ref = (next_plus_upper & ~index_mask) + index;
179 entry->ref = ref; 183 entry->ref = ref;
180 entry->object = object; 184 entry->object = object;
181 spin_unlock_bh(&entry->lock);
182 *lock = &entry->lock; 185 *lock = &entry->lock;
183 } 186 }
184 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { 187 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
185 index = tipc_ref_table.init_point++; 188 index = tipc_ref_table.init_point++;
186 entry = &(tipc_ref_table.entries[index]); 189 entry = &(tipc_ref_table.entries[index]);
187 spin_lock_init(&entry->lock); 190 spin_lock_init(&entry->lock);
191 spin_lock_bh(&entry->lock);
188 ref = tipc_ref_table.start_mask + index; 192 ref = tipc_ref_table.start_mask + index;
189 entry->ref = ref; 193 entry->ref = ref;
190 entry->object = object; 194 entry->object = object;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 230f9ca2ad6b..1848693ebb82 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2,7 +2,7 @@
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, Ericsson AB 4 * Copyright (c) 2001-2007, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems 5 * Copyright (c) 2004-2008, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,7 @@
63struct tipc_sock { 63struct tipc_sock {
64 struct sock sk; 64 struct sock sk;
65 struct tipc_port *p; 65 struct tipc_port *p;
66 struct tipc_portid peer_name;
66}; 67};
67 68
68#define tipc_sk(sk) ((struct tipc_sock *)(sk)) 69#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -188,7 +189,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
188 const struct proto_ops *ops; 189 const struct proto_ops *ops;
189 socket_state state; 190 socket_state state;
190 struct sock *sk; 191 struct sock *sk;
191 u32 portref; 192 struct tipc_port *tp_ptr;
192 193
193 /* Validate arguments */ 194 /* Validate arguments */
194 195
@@ -224,9 +225,9 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
224 225
225 /* Allocate TIPC port for socket to use */ 226 /* Allocate TIPC port for socket to use */
226 227
227 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, 228 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
228 TIPC_LOW_IMPORTANCE); 229 TIPC_LOW_IMPORTANCE);
229 if (unlikely(portref == 0)) { 230 if (unlikely(!tp_ptr)) {
230 sk_free(sk); 231 sk_free(sk);
231 return -ENOMEM; 232 return -ENOMEM;
232 } 233 }
@@ -239,12 +240,14 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
239 sock_init_data(sock, sk); 240 sock_init_data(sock, sk);
240 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT); 241 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
241 sk->sk_backlog_rcv = backlog_rcv; 242 sk->sk_backlog_rcv = backlog_rcv;
242 tipc_sk(sk)->p = tipc_get_port(portref); 243 tipc_sk(sk)->p = tp_ptr;
244
245 spin_unlock_bh(tp_ptr->lock);
243 246
244 if (sock->state == SS_READY) { 247 if (sock->state == SS_READY) {
245 tipc_set_portunreturnable(portref, 1); 248 tipc_set_portunreturnable(tp_ptr->ref, 1);
246 if (sock->type == SOCK_DGRAM) 249 if (sock->type == SOCK_DGRAM)
247 tipc_set_portunreliable(portref, 1); 250 tipc_set_portunreliable(tp_ptr->ref, 1);
248 } 251 }
249 252
250 atomic_inc(&tipc_user_count); 253 atomic_inc(&tipc_user_count);
@@ -375,27 +378,29 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
375 * @sock: socket structure 378 * @sock: socket structure
376 * @uaddr: area for returned socket address 379 * @uaddr: area for returned socket address
377 * @uaddr_len: area for returned length of socket address 380 * @uaddr_len: area for returned length of socket address
378 * @peer: 0 to obtain socket name, 1 to obtain peer socket name 381 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
379 * 382 *
380 * Returns 0 on success, errno otherwise 383 * Returns 0 on success, errno otherwise
381 * 384 *
382 * NOTE: This routine doesn't need to take the socket lock since it doesn't 385 * NOTE: This routine doesn't need to take the socket lock since it only
383 * access any non-constant socket information. 386 * accesses socket information that is unchanging (or which changes in
387 * a completely predictable manner).
384 */ 388 */
385 389
386static int get_name(struct socket *sock, struct sockaddr *uaddr, 390static int get_name(struct socket *sock, struct sockaddr *uaddr,
387 int *uaddr_len, int peer) 391 int *uaddr_len, int peer)
388{ 392{
389 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 393 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
390 u32 portref = tipc_sk_port(sock->sk)->ref; 394 struct tipc_sock *tsock = tipc_sk(sock->sk);
391 u32 res;
392 395
393 if (peer) { 396 if (peer) {
394 res = tipc_peer(portref, &addr->addr.id); 397 if ((sock->state != SS_CONNECTED) &&
395 if (res) 398 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
396 return res; 399 return -ENOTCONN;
400 addr->addr.id.ref = tsock->peer_name.ref;
401 addr->addr.id.node = tsock->peer_name.node;
397 } else { 402 } else {
398 tipc_ownidentity(portref, &addr->addr.id); 403 tipc_ownidentity(tsock->p->ref, &addr->addr.id);
399 } 404 }
400 405
401 *uaddr_len = sizeof(*addr); 406 *uaddr_len = sizeof(*addr);
@@ -764,18 +769,17 @@ exit:
764 769
765static int auto_connect(struct socket *sock, struct tipc_msg *msg) 770static int auto_connect(struct socket *sock, struct tipc_msg *msg)
766{ 771{
767 struct tipc_port *tport = tipc_sk_port(sock->sk); 772 struct tipc_sock *tsock = tipc_sk(sock->sk);
768 struct tipc_portid peer;
769 773
770 if (msg_errcode(msg)) { 774 if (msg_errcode(msg)) {
771 sock->state = SS_DISCONNECTING; 775 sock->state = SS_DISCONNECTING;
772 return -ECONNREFUSED; 776 return -ECONNREFUSED;
773 } 777 }
774 778
775 peer.ref = msg_origport(msg); 779 tsock->peer_name.ref = msg_origport(msg);
776 peer.node = msg_orignode(msg); 780 tsock->peer_name.node = msg_orignode(msg);
777 tipc_connect2port(tport->ref, &peer); 781 tipc_connect2port(tsock->p->ref, &tsock->peer_name);
778 tipc_set_portimportance(tport->ref, msg_importance(msg)); 782 tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
779 sock->state = SS_CONNECTED; 783 sock->state = SS_CONNECTED;
780 return 0; 784 return 0;
781} 785}
@@ -1131,7 +1135,7 @@ restart:
1131 /* Loop around if more data is required */ 1135 /* Loop around if more data is required */
1132 1136
1133 if ((sz_copied < buf_len) /* didn't get all requested data */ 1137 if ((sz_copied < buf_len) /* didn't get all requested data */
1134 && (!skb_queue_empty(&sock->sk->sk_receive_queue) || 1138 && (!skb_queue_empty(&sk->sk_receive_queue) ||
1135 (flags & MSG_WAITALL)) 1139 (flags & MSG_WAITALL))
1136 /* ... and more is ready or required */ 1140 /* ... and more is ready or required */
1137 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */ 1141 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
@@ -1527,9 +1531,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1527 res = tipc_create(sock_net(sock->sk), new_sock, 0); 1531 res = tipc_create(sock_net(sock->sk), new_sock, 0);
1528 if (!res) { 1532 if (!res) {
1529 struct sock *new_sk = new_sock->sk; 1533 struct sock *new_sk = new_sock->sk;
1530 struct tipc_port *new_tport = tipc_sk_port(new_sk); 1534 struct tipc_sock *new_tsock = tipc_sk(new_sk);
1535 struct tipc_port *new_tport = new_tsock->p;
1531 u32 new_ref = new_tport->ref; 1536 u32 new_ref = new_tport->ref;
1532 struct tipc_portid id;
1533 struct tipc_msg *msg = buf_msg(buf); 1537 struct tipc_msg *msg = buf_msg(buf);
1534 1538
1535 lock_sock(new_sk); 1539 lock_sock(new_sk);
@@ -1543,9 +1547,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1543 1547
1544 /* Connect new socket to it's peer */ 1548 /* Connect new socket to it's peer */
1545 1549
1546 id.ref = msg_origport(msg); 1550 new_tsock->peer_name.ref = msg_origport(msg);
1547 id.node = msg_orignode(msg); 1551 new_tsock->peer_name.node = msg_orignode(msg);
1548 tipc_connect2port(new_ref, &id); 1552 tipc_connect2port(new_ref, &new_tsock->peer_name);
1549 new_sock->state = SS_CONNECTED; 1553 new_sock->state = SS_CONNECTED;
1550 1554
1551 tipc_set_portimportance(new_ref, msg_importance(msg)); 1555 tipc_set_portimportance(new_ref, msg_importance(msg));
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 8c01ccd3626c..0326d3060bc7 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/subscr.c: TIPC subscription service 2 * net/tipc/subscr.c: TIPC network topology service
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -36,27 +36,24 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h" 38#include "dbg.h"
39#include "subscr.h"
40#include "name_table.h" 39#include "name_table.h"
40#include "port.h"
41#include "ref.h" 41#include "ref.h"
42#include "subscr.h"
42 43
43/** 44/**
44 * struct subscriber - TIPC network topology subscriber 45 * struct subscriber - TIPC network topology subscriber
45 * @ref: object reference to subscriber object itself 46 * @port_ref: object reference to server port connecting to subscriber
46 * @lock: pointer to spinlock controlling access to subscriber object 47 * @lock: pointer to spinlock controlling access to subscriber's server port
47 * @subscriber_list: adjacent subscribers in top. server's list of subscribers 48 * @subscriber_list: adjacent subscribers in top. server's list of subscribers
48 * @subscription_list: list of subscription objects for this subscriber 49 * @subscription_list: list of subscription objects for this subscriber
49 * @port_ref: object reference to port used to communicate with subscriber
50 * @swap: indicates if subscriber uses opposite endianness in its messages
51 */ 50 */
52 51
53struct subscriber { 52struct subscriber {
54 u32 ref; 53 u32 port_ref;
55 spinlock_t *lock; 54 spinlock_t *lock;
56 struct list_head subscriber_list; 55 struct list_head subscriber_list;
57 struct list_head subscription_list; 56 struct list_head subscription_list;
58 u32 port_ref;
59 int swap;
60}; 57};
61 58
62/** 59/**
@@ -88,13 +85,14 @@ static struct top_srv topsrv = { 0 };
88 85
89static u32 htohl(u32 in, int swap) 86static u32 htohl(u32 in, int swap)
90{ 87{
91 char *c = (char *)&in; 88 return swap ? (u32)___constant_swab32(in) : in;
92
93 return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
94} 89}
95 90
96/** 91/**
97 * subscr_send_event - send a message containing a tipc_event to the subscriber 92 * subscr_send_event - send a message containing a tipc_event to the subscriber
93 *
94 * Note: Must not hold subscriber's server port lock, since tipc_send() will
95 * try to take the lock if the message is rejected and returned!
98 */ 96 */
99 97
100static void subscr_send_event(struct subscription *sub, 98static void subscr_send_event(struct subscription *sub,
@@ -109,12 +107,12 @@ static void subscr_send_event(struct subscription *sub,
109 msg_sect.iov_base = (void *)&sub->evt; 107 msg_sect.iov_base = (void *)&sub->evt;
110 msg_sect.iov_len = sizeof(struct tipc_event); 108 msg_sect.iov_len = sizeof(struct tipc_event);
111 109
112 sub->evt.event = htohl(event, sub->owner->swap); 110 sub->evt.event = htohl(event, sub->swap);
113 sub->evt.found_lower = htohl(found_lower, sub->owner->swap); 111 sub->evt.found_lower = htohl(found_lower, sub->swap);
114 sub->evt.found_upper = htohl(found_upper, sub->owner->swap); 112 sub->evt.found_upper = htohl(found_upper, sub->swap);
115 sub->evt.port.ref = htohl(port_ref, sub->owner->swap); 113 sub->evt.port.ref = htohl(port_ref, sub->swap);
116 sub->evt.port.node = htohl(node, sub->owner->swap); 114 sub->evt.port.node = htohl(node, sub->swap);
117 tipc_send(sub->owner->port_ref, 1, &msg_sect); 115 tipc_send(sub->server_ref, 1, &msg_sect);
118} 116}
119 117
120/** 118/**
@@ -151,13 +149,12 @@ void tipc_subscr_report_overlap(struct subscription *sub,
151 u32 node, 149 u32 node,
152 int must) 150 int must)
153{ 151{
154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
155 sub->seq.upper, found_lower, found_upper);
156 if (!tipc_subscr_overlap(sub, found_lower, found_upper)) 152 if (!tipc_subscr_overlap(sub, found_lower, found_upper))
157 return; 153 return;
158 if (!must && !(sub->filter & TIPC_SUB_PORTS)) 154 if (!must && !(sub->filter & TIPC_SUB_PORTS))
159 return; 155 return;
160 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); 156
157 sub->event_cb(sub, found_lower, found_upper, event, port_ref, node);
161} 158}
162 159
163/** 160/**
@@ -166,20 +163,18 @@ void tipc_subscr_report_overlap(struct subscription *sub,
166 163
167static void subscr_timeout(struct subscription *sub) 164static void subscr_timeout(struct subscription *sub)
168{ 165{
169 struct subscriber *subscriber; 166 struct port *server_port;
170 u32 subscriber_ref;
171 167
172 /* Validate subscriber reference (in case subscriber is terminating) */ 168 /* Validate server port reference (in case subscriber is terminating) */
173 169
174 subscriber_ref = sub->owner->ref; 170 server_port = tipc_port_lock(sub->server_ref);
175 subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref); 171 if (server_port == NULL)
176 if (subscriber == NULL)
177 return; 172 return;
178 173
179 /* Validate timeout (in case subscription is being cancelled) */ 174 /* Validate timeout (in case subscription is being cancelled) */
180 175
181 if (sub->timeout == TIPC_WAIT_FOREVER) { 176 if (sub->timeout == TIPC_WAIT_FOREVER) {
182 tipc_ref_unlock(subscriber_ref); 177 tipc_port_unlock(server_port);
183 return; 178 return;
184 } 179 }
185 180
@@ -187,19 +182,21 @@ static void subscr_timeout(struct subscription *sub)
187 182
188 tipc_nametbl_unsubscribe(sub); 183 tipc_nametbl_unsubscribe(sub);
189 184
190 /* Notify subscriber of timeout, then unlink subscription */ 185 /* Unlink subscription from subscriber */
191 186
192 subscr_send_event(sub,
193 sub->evt.s.seq.lower,
194 sub->evt.s.seq.upper,
195 TIPC_SUBSCR_TIMEOUT,
196 0,
197 0);
198 list_del(&sub->subscription_list); 187 list_del(&sub->subscription_list);
199 188
189 /* Release subscriber's server port */
190
191 tipc_port_unlock(server_port);
192
193 /* Notify subscriber of timeout */
194
195 subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
196 TIPC_SUBSCR_TIMEOUT, 0, 0);
197
200 /* Now destroy subscription */ 198 /* Now destroy subscription */
201 199
202 tipc_ref_unlock(subscriber_ref);
203 k_term_timer(&sub->timer); 200 k_term_timer(&sub->timer);
204 kfree(sub); 201 kfree(sub);
205 atomic_dec(&topsrv.subscription_count); 202 atomic_dec(&topsrv.subscription_count);
@@ -208,7 +205,7 @@ static void subscr_timeout(struct subscription *sub)
208/** 205/**
209 * subscr_del - delete a subscription within a subscription list 206 * subscr_del - delete a subscription within a subscription list
210 * 207 *
211 * Called with subscriber locked. 208 * Called with subscriber port locked.
212 */ 209 */
213 210
214static void subscr_del(struct subscription *sub) 211static void subscr_del(struct subscription *sub)
@@ -222,7 +219,7 @@ static void subscr_del(struct subscription *sub)
222/** 219/**
223 * subscr_terminate - terminate communication with a subscriber 220 * subscr_terminate - terminate communication with a subscriber
224 * 221 *
225 * Called with subscriber locked. Routine must temporarily release this lock 222 * Called with subscriber port locked. Routine must temporarily release lock
226 * to enable subscription timeout routine(s) to finish without deadlocking; 223 * to enable subscription timeout routine(s) to finish without deadlocking;
227 * the lock is then reclaimed to allow caller to release it upon return. 224 * the lock is then reclaimed to allow caller to release it upon return.
228 * (This should work even in the unlikely event some other thread creates 225 * (This should work even in the unlikely event some other thread creates
@@ -232,14 +229,21 @@ static void subscr_del(struct subscription *sub)
232 229
233static void subscr_terminate(struct subscriber *subscriber) 230static void subscr_terminate(struct subscriber *subscriber)
234{ 231{
232 u32 port_ref;
235 struct subscription *sub; 233 struct subscription *sub;
236 struct subscription *sub_temp; 234 struct subscription *sub_temp;
237 235
238 /* Invalidate subscriber reference */ 236 /* Invalidate subscriber reference */
239 237
240 tipc_ref_discard(subscriber->ref); 238 port_ref = subscriber->port_ref;
239 subscriber->port_ref = 0;
241 spin_unlock_bh(subscriber->lock); 240 spin_unlock_bh(subscriber->lock);
242 241
242 /* Sever connection to subscriber */
243
244 tipc_shutdown(port_ref);
245 tipc_deleteport(port_ref);
246
243 /* Destroy any existing subscriptions for subscriber */ 247 /* Destroy any existing subscriptions for subscriber */
244 248
245 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 249 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
@@ -253,27 +257,25 @@ static void subscr_terminate(struct subscriber *subscriber)
253 subscr_del(sub); 257 subscr_del(sub);
254 } 258 }
255 259
256 /* Sever connection to subscriber */
257
258 tipc_shutdown(subscriber->port_ref);
259 tipc_deleteport(subscriber->port_ref);
260
261 /* Remove subscriber from topology server's subscriber list */ 260 /* Remove subscriber from topology server's subscriber list */
262 261
263 spin_lock_bh(&topsrv.lock); 262 spin_lock_bh(&topsrv.lock);
264 list_del(&subscriber->subscriber_list); 263 list_del(&subscriber->subscriber_list);
265 spin_unlock_bh(&topsrv.lock); 264 spin_unlock_bh(&topsrv.lock);
266 265
267 /* Now destroy subscriber */ 266 /* Reclaim subscriber lock */
268 267
269 spin_lock_bh(subscriber->lock); 268 spin_lock_bh(subscriber->lock);
269
270 /* Now destroy subscriber */
271
270 kfree(subscriber); 272 kfree(subscriber);
271} 273}
272 274
273/** 275/**
274 * subscr_cancel - handle subscription cancellation request 276 * subscr_cancel - handle subscription cancellation request
275 * 277 *
276 * Called with subscriber locked. Routine must temporarily release this lock 278 * Called with subscriber port locked. Routine must temporarily release lock
277 * to enable the subscription timeout routine to finish without deadlocking; 279 * to enable the subscription timeout routine to finish without deadlocking;
278 * the lock is then reclaimed to allow caller to release it upon return. 280 * the lock is then reclaimed to allow caller to release it upon return.
279 * 281 *
@@ -316,27 +318,25 @@ static void subscr_cancel(struct tipc_subscr *s,
316/** 318/**
317 * subscr_subscribe - create subscription for subscriber 319 * subscr_subscribe - create subscription for subscriber
318 * 320 *
319 * Called with subscriber locked 321 * Called with subscriber port locked.
320 */ 322 */
321 323
322static void subscr_subscribe(struct tipc_subscr *s, 324static struct subscription *subscr_subscribe(struct tipc_subscr *s,
323 struct subscriber *subscriber) 325 struct subscriber *subscriber)
324{ 326{
325 struct subscription *sub; 327 struct subscription *sub;
328 int swap;
326 329
327 /* Determine/update subscriber's endianness */ 330 /* Determine subscriber's endianness */
328 331
329 if (s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)) 332 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
330 subscriber->swap = 0;
331 else
332 subscriber->swap = 1;
333 333
334 /* Detect & process a subscription cancellation request */ 334 /* Detect & process a subscription cancellation request */
335 335
336 if (s->filter & htohl(TIPC_SUB_CANCEL, subscriber->swap)) { 336 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
337 s->filter &= ~htohl(TIPC_SUB_CANCEL, subscriber->swap); 337 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
338 subscr_cancel(s, subscriber); 338 subscr_cancel(s, subscriber);
339 return; 339 return NULL;
340 } 340 }
341 341
342 /* Refuse subscription if global limit exceeded */ 342 /* Refuse subscription if global limit exceeded */
@@ -345,63 +345,66 @@ static void subscr_subscribe(struct tipc_subscr *s,
345 warn("Subscription rejected, subscription limit reached (%u)\n", 345 warn("Subscription rejected, subscription limit reached (%u)\n",
346 tipc_max_subscriptions); 346 tipc_max_subscriptions);
347 subscr_terminate(subscriber); 347 subscr_terminate(subscriber);
348 return; 348 return NULL;
349 } 349 }
350 350
351 /* Allocate subscription object */ 351 /* Allocate subscription object */
352 352
353 sub = kzalloc(sizeof(*sub), GFP_ATOMIC); 353 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
354 if (!sub) { 354 if (!sub) {
355 warn("Subscription rejected, no memory\n"); 355 warn("Subscription rejected, no memory\n");
356 subscr_terminate(subscriber); 356 subscr_terminate(subscriber);
357 return; 357 return NULL;
358 } 358 }
359 359
360 /* Initialize subscription object */ 360 /* Initialize subscription object */
361 361
362 sub->seq.type = htohl(s->seq.type, subscriber->swap); 362 sub->seq.type = htohl(s->seq.type, swap);
363 sub->seq.lower = htohl(s->seq.lower, subscriber->swap); 363 sub->seq.lower = htohl(s->seq.lower, swap);
364 sub->seq.upper = htohl(s->seq.upper, subscriber->swap); 364 sub->seq.upper = htohl(s->seq.upper, swap);
365 sub->timeout = htohl(s->timeout, subscriber->swap); 365 sub->timeout = htohl(s->timeout, swap);
366 sub->filter = htohl(s->filter, subscriber->swap); 366 sub->filter = htohl(s->filter, swap);
367 if ((!(sub->filter & TIPC_SUB_PORTS) 367 if ((!(sub->filter & TIPC_SUB_PORTS)
368 == !(sub->filter & TIPC_SUB_SERVICE)) 368 == !(sub->filter & TIPC_SUB_SERVICE))
369 || (sub->seq.lower > sub->seq.upper)) { 369 || (sub->seq.lower > sub->seq.upper)) {
370 warn("Subscription rejected, illegal request\n"); 370 warn("Subscription rejected, illegal request\n");
371 kfree(sub); 371 kfree(sub);
372 subscr_terminate(subscriber); 372 subscr_terminate(subscriber);
373 return; 373 return NULL;
374 } 374 }
375 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); 375 sub->event_cb = subscr_send_event;
376 INIT_LIST_HEAD(&sub->subscription_list);
377 INIT_LIST_HEAD(&sub->nameseq_list); 376 INIT_LIST_HEAD(&sub->nameseq_list);
378 list_add(&sub->subscription_list, &subscriber->subscription_list); 377 list_add(&sub->subscription_list, &subscriber->subscription_list);
378 sub->server_ref = subscriber->port_ref;
379 sub->swap = swap;
380 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
379 atomic_inc(&topsrv.subscription_count); 381 atomic_inc(&topsrv.subscription_count);
380 if (sub->timeout != TIPC_WAIT_FOREVER) { 382 if (sub->timeout != TIPC_WAIT_FOREVER) {
381 k_init_timer(&sub->timer, 383 k_init_timer(&sub->timer,
382 (Handler)subscr_timeout, (unsigned long)sub); 384 (Handler)subscr_timeout, (unsigned long)sub);
383 k_start_timer(&sub->timer, sub->timeout); 385 k_start_timer(&sub->timer, sub->timeout);
384 } 386 }
385 sub->owner = subscriber; 387
386 tipc_nametbl_subscribe(sub); 388 return sub;
387} 389}
388 390
389/** 391/**
390 * subscr_conn_shutdown_event - handle termination request from subscriber 392 * subscr_conn_shutdown_event - handle termination request from subscriber
393 *
394 * Called with subscriber's server port unlocked.
391 */ 395 */
392 396
393static void subscr_conn_shutdown_event(void *usr_handle, 397static void subscr_conn_shutdown_event(void *usr_handle,
394 u32 portref, 398 u32 port_ref,
395 struct sk_buff **buf, 399 struct sk_buff **buf,
396 unsigned char const *data, 400 unsigned char const *data,
397 unsigned int size, 401 unsigned int size,
398 int reason) 402 int reason)
399{ 403{
400 struct subscriber *subscriber; 404 struct subscriber *subscriber = usr_handle;
401 spinlock_t *subscriber_lock; 405 spinlock_t *subscriber_lock;
402 406
403 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); 407 if (tipc_port_lock(port_ref) == NULL)
404 if (subscriber == NULL)
405 return; 408 return;
406 409
407 subscriber_lock = subscriber->lock; 410 subscriber_lock = subscriber->lock;
@@ -411,6 +414,8 @@ static void subscr_conn_shutdown_event(void *usr_handle,
411 414
412/** 415/**
413 * subscr_conn_msg_event - handle new subscription request from subscriber 416 * subscr_conn_msg_event - handle new subscription request from subscriber
417 *
418 * Called with subscriber's server port unlocked.
414 */ 419 */
415 420
416static void subscr_conn_msg_event(void *usr_handle, 421static void subscr_conn_msg_event(void *usr_handle,
@@ -419,20 +424,46 @@ static void subscr_conn_msg_event(void *usr_handle,
419 const unchar *data, 424 const unchar *data,
420 u32 size) 425 u32 size)
421{ 426{
422 struct subscriber *subscriber; 427 struct subscriber *subscriber = usr_handle;
423 spinlock_t *subscriber_lock; 428 spinlock_t *subscriber_lock;
429 struct subscription *sub;
430
431 /*
432 * Lock subscriber's server port (& make a local copy of lock pointer,
433 * in case subscriber is deleted while processing subscription request)
434 */
424 435
425 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); 436 if (tipc_port_lock(port_ref) == NULL)
426 if (subscriber == NULL)
427 return; 437 return;
428 438
429 subscriber_lock = subscriber->lock; 439 subscriber_lock = subscriber->lock;
430 if (size != sizeof(struct tipc_subscr))
431 subscr_terminate(subscriber);
432 else
433 subscr_subscribe((struct tipc_subscr *)data, subscriber);
434 440
435 spin_unlock_bh(subscriber_lock); 441 if (size != sizeof(struct tipc_subscr)) {
442 subscr_terminate(subscriber);
443 spin_unlock_bh(subscriber_lock);
444 } else {
445 sub = subscr_subscribe((struct tipc_subscr *)data, subscriber);
446 spin_unlock_bh(subscriber_lock);
447 if (sub != NULL) {
448
449 /*
450 * We must release the server port lock before adding a
451 * subscription to the name table since TIPC needs to be
452 * able to (re)acquire the port lock if an event message
453 * issued by the subscription process is rejected and
454 * returned. The subscription cannot be deleted while
455 * it is being added to the name table because:
456 * a) the single-threading of the native API port code
457 * ensures the subscription cannot be cancelled and
458 * the subscriber connection cannot be broken, and
459 * b) the name table lock ensures the subscription
460 * timeout code cannot delete the subscription,
461 * so the subscription object is still protected.
462 */
463
464 tipc_nametbl_subscribe(sub);
465 }
466 }
436} 467}
437 468
438/** 469/**
@@ -448,16 +479,10 @@ static void subscr_named_msg_event(void *usr_handle,
448 struct tipc_portid const *orig, 479 struct tipc_portid const *orig,
449 struct tipc_name_seq const *dest) 480 struct tipc_name_seq const *dest)
450{ 481{
451 struct subscriber *subscriber; 482 static struct iovec msg_sect = {NULL, 0};
452 struct iovec msg_sect = {NULL, 0};
453 spinlock_t *subscriber_lock;
454 483
455 dbg("subscr_named_msg_event: orig = %x own = %x,\n", 484 struct subscriber *subscriber;
456 orig->node, tipc_own_addr); 485 u32 server_port_ref;
457 if (size && (size != sizeof(struct tipc_subscr))) {
458 warn("Subscriber rejected, invalid subscription size\n");
459 return;
460 }
461 486
462 /* Create subscriber object */ 487 /* Create subscriber object */
463 488
@@ -468,17 +493,11 @@ static void subscr_named_msg_event(void *usr_handle,
468 } 493 }
469 INIT_LIST_HEAD(&subscriber->subscription_list); 494 INIT_LIST_HEAD(&subscriber->subscription_list);
470 INIT_LIST_HEAD(&subscriber->subscriber_list); 495 INIT_LIST_HEAD(&subscriber->subscriber_list);
471 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
472 if (subscriber->ref == 0) {
473 warn("Subscriber rejected, reference table exhausted\n");
474 kfree(subscriber);
475 return;
476 }
477 496
478 /* Establish a connection to subscriber */ 497 /* Create server port & establish connection to subscriber */
479 498
480 tipc_createport(topsrv.user_ref, 499 tipc_createport(topsrv.user_ref,
481 (void *)(unsigned long)subscriber->ref, 500 subscriber,
482 importance, 501 importance,
483 NULL, 502 NULL,
484 NULL, 503 NULL,
@@ -490,32 +509,36 @@ static void subscr_named_msg_event(void *usr_handle,
490 &subscriber->port_ref); 509 &subscriber->port_ref);
491 if (subscriber->port_ref == 0) { 510 if (subscriber->port_ref == 0) {
492 warn("Subscriber rejected, unable to create port\n"); 511 warn("Subscriber rejected, unable to create port\n");
493 tipc_ref_discard(subscriber->ref);
494 kfree(subscriber); 512 kfree(subscriber);
495 return; 513 return;
496 } 514 }
497 tipc_connect2port(subscriber->port_ref, orig); 515 tipc_connect2port(subscriber->port_ref, orig);
498 516
517 /* Lock server port (& save lock address for future use) */
518
519 subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
499 520
500 /* Add subscriber to topology server's subscriber list */ 521 /* Add subscriber to topology server's subscriber list */
501 522
502 tipc_ref_lock(subscriber->ref);
503 spin_lock_bh(&topsrv.lock); 523 spin_lock_bh(&topsrv.lock);
504 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); 524 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
505 spin_unlock_bh(&topsrv.lock); 525 spin_unlock_bh(&topsrv.lock);
506 526
507 /* 527 /* Unlock server port */
508 * Subscribe now if message contains a subscription,
509 * otherwise send an empty response to complete connection handshaking
510 */
511 528
512 subscriber_lock = subscriber->lock; 529 server_port_ref = subscriber->port_ref;
513 if (size) 530 spin_unlock_bh(subscriber->lock);
514 subscr_subscribe((struct tipc_subscr *)data, subscriber);
515 else
516 tipc_send(subscriber->port_ref, 1, &msg_sect);
517 531
518 spin_unlock_bh(subscriber_lock); 532 /* Send an ACK- to complete connection handshaking */
533
534 tipc_send(server_port_ref, 1, &msg_sect);
535
536 /* Handle optional subscription request */
537
538 if (size != 0) {
539 subscr_conn_msg_event(subscriber, server_port_ref,
540 buf, data, size);
541 }
519} 542}
520 543
521int tipc_subscr_start(void) 544int tipc_subscr_start(void)
@@ -574,8 +597,8 @@ void tipc_subscr_stop(void)
574 list_for_each_entry_safe(subscriber, subscriber_temp, 597 list_for_each_entry_safe(subscriber, subscriber_temp,
575 &topsrv.subscriber_list, 598 &topsrv.subscriber_list,
576 subscriber_list) { 599 subscriber_list) {
577 tipc_ref_lock(subscriber->ref);
578 subscriber_lock = subscriber->lock; 600 subscriber_lock = subscriber->lock;
601 spin_lock_bh(subscriber_lock);
579 subscr_terminate(subscriber); 602 subscr_terminate(subscriber);
580 spin_unlock_bh(subscriber_lock); 603 spin_unlock_bh(subscriber_lock);
581 } 604 }
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 93a8e674fac1..45d89bf4d202 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/subscr.h: Include file for TIPC subscription service 2 * net/tipc/subscr.h: Include file for TIPC network topology service
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -37,34 +37,44 @@
37#ifndef _TIPC_SUBSCR_H 37#ifndef _TIPC_SUBSCR_H
38#define _TIPC_SUBSCR_H 38#define _TIPC_SUBSCR_H
39 39
40struct subscription;
41
42typedef void (*tipc_subscr_event) (struct subscription *sub,
43 u32 found_lower, u32 found_upper,
44 u32 event, u32 port_ref, u32 node);
45
40/** 46/**
41 * struct subscription - TIPC network topology subscription object 47 * struct subscription - TIPC network topology subscription object
42 * @seq: name sequence associated with subscription 48 * @seq: name sequence associated with subscription
43 * @timeout: duration of subscription (in ms) 49 * @timeout: duration of subscription (in ms)
44 * @filter: event filtering to be done for subscription 50 * @filter: event filtering to be done for subscription
45 * @evt: template for events generated by subscription 51 * @event_cb: routine invoked when a subscription event is detected
46 * @subscription_list: adjacent subscriptions in subscriber's subscription list 52 * @timer: timer governing subscription duration (optional)
47 * @nameseq_list: adjacent subscriptions in name sequence's subscription list 53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
48 * @timer_ref: reference to timer governing subscription duration (may be NULL) 54 * @subscription_list: adjacent subscriptions in subscriber's subscription list
49 * @owner: pointer to subscriber object associated with this subscription 55 * @server_ref: object reference of server port associated with subscription
56 * @swap: indicates if subscriber uses opposite endianness in its messages
57 * @evt: template for events generated by subscription
50 */ 58 */
51 59
52struct subscription { 60struct subscription {
53 struct tipc_name_seq seq; 61 struct tipc_name_seq seq;
54 u32 timeout; 62 u32 timeout;
55 u32 filter; 63 u32 filter;
56 struct tipc_event evt; 64 tipc_subscr_event event_cb;
57 struct list_head subscription_list;
58 struct list_head nameseq_list;
59 struct timer_list timer; 65 struct timer_list timer;
60 struct subscriber *owner; 66 struct list_head nameseq_list;
67 struct list_head subscription_list;
68 u32 server_ref;
69 int swap;
70 struct tipc_event evt;
61}; 71};
62 72
63int tipc_subscr_overlap(struct subscription * sub, 73int tipc_subscr_overlap(struct subscription *sub,
64 u32 found_lower, 74 u32 found_lower,
65 u32 found_upper); 75 u32 found_upper);
66 76
67void tipc_subscr_report_overlap(struct subscription * sub, 77void tipc_subscr_report_overlap(struct subscription *sub,
68 u32 found_lower, 78 u32 found_lower,
69 u32 found_upper, 79 u32 found_upper,
70 u32 event, 80 u32 event,
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 4146c40cd20b..506928803162 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -91,7 +91,7 @@ static int reg_init(void)
91 } 91 }
92 } 92 }
93 spin_unlock_bh(&reg_lock); 93 spin_unlock_bh(&reg_lock);
94 return users ? TIPC_OK : -ENOMEM; 94 return users ? 0 : -ENOMEM;
95} 95}
96 96
97/** 97/**
@@ -129,7 +129,7 @@ int tipc_reg_start(void)
129 tipc_k_signal((Handler)reg_callback, 129 tipc_k_signal((Handler)reg_callback,
130 (unsigned long)&users[u]); 130 (unsigned long)&users[u]);
131 } 131 }
132 return TIPC_OK; 132 return 0;
133} 133}
134 134
135/** 135/**
@@ -184,7 +184,7 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
184 184
185 if (cb && (tipc_mode != TIPC_NOT_RUNNING)) 185 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
186 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr); 186 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
187 return TIPC_OK; 187 return 0;
188} 188}
189 189
190/** 190/**
@@ -230,7 +230,7 @@ int tipc_reg_add_port(struct user_port *up_ptr)
230 struct tipc_user *user_ptr; 230 struct tipc_user *user_ptr;
231 231
232 if (up_ptr->user_ref == 0) 232 if (up_ptr->user_ref == 0)
233 return TIPC_OK; 233 return 0;
234 if (up_ptr->user_ref > MAX_USERID) 234 if (up_ptr->user_ref > MAX_USERID)
235 return -EINVAL; 235 return -EINVAL;
236 if ((tipc_mode == TIPC_NOT_RUNNING) || !users ) 236 if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
@@ -240,7 +240,7 @@ int tipc_reg_add_port(struct user_port *up_ptr)
240 user_ptr = &users[up_ptr->user_ref]; 240 user_ptr = &users[up_ptr->user_ref];
241 list_add(&up_ptr->uport_list, &user_ptr->ports); 241 list_add(&up_ptr->uport_list, &user_ptr->ports);
242 spin_unlock_bh(&reg_lock); 242 spin_unlock_bh(&reg_lock);
243 return TIPC_OK; 243 return 0;
244} 244}
245 245
246/** 246/**
@@ -250,7 +250,7 @@ int tipc_reg_add_port(struct user_port *up_ptr)
250int tipc_reg_remove_port(struct user_port *up_ptr) 250int tipc_reg_remove_port(struct user_port *up_ptr)
251{ 251{
252 if (up_ptr->user_ref == 0) 252 if (up_ptr->user_ref == 0)
253 return TIPC_OK; 253 return 0;
254 if (up_ptr->user_ref > MAX_USERID) 254 if (up_ptr->user_ref > MAX_USERID)
255 return -EINVAL; 255 return -EINVAL;
256 if (!users ) 256 if (!users )
@@ -259,6 +259,6 @@ int tipc_reg_remove_port(struct user_port *up_ptr)
259 spin_lock_bh(&reg_lock); 259 spin_lock_bh(&reg_lock);
260 list_del_init(&up_ptr->uport_list); 260 list_del_init(&up_ptr->uport_list);
261 spin_unlock_bh(&reg_lock); 261 spin_unlock_bh(&reg_lock);
262 return TIPC_OK; 262 return 0;
263} 263}
264 264
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 783317dacd30..70ceb1604ad8 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -8,8 +8,6 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
12 *
13 * Fixes: 11 * Fixes:
14 * Linus Torvalds : Assorted bug cures. 12 * Linus Torvalds : Assorted bug cures.
15 * Niibe Yutaka : async I/O support. 13 * Niibe Yutaka : async I/O support.
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
index 1debe1cb054e..61ceae0b9566 100644
--- a/net/wanrouter/Kconfig
+++ b/net/wanrouter/Kconfig
@@ -20,8 +20,6 @@ config WAN_ROUTER
20 wish to use your Linux box as a WAN router, say Y here and also to 20 wish to use your Linux box as a WAN router, say Y here and also to
21 the WAN driver for your card, below. You will then need the 21 the WAN driver for your card, below. You will then need the
22 wan-tools package which is available from <ftp://ftp.sangoma.com/>. 22 wan-tools package which is available from <ftp://ftp.sangoma.com/>.
23 Read <file:Documentation/networking/wan-router.txt> for more
24 information.
25 23
26 To compile WAN routing support as a module, choose M here: the 24 To compile WAN routing support as a module, choose M here: the
27 module will be called wanrouter. 25 module will be called wanrouter.
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 9ab31a3ce3ad..b210a88d0960 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -350,9 +350,9 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
350 * o execute requested action or pass command to the device driver 350 * o execute requested action or pass command to the device driver
351 */ 351 */
352 352
353int wanrouter_ioctl(struct inode *inode, struct file *file, 353long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
354 unsigned int cmd, unsigned long arg)
355{ 354{
355 struct inode *inode = file->f_path.dentry->d_inode;
356 int err = 0; 356 int err = 0;
357 struct proc_dir_entry *dent; 357 struct proc_dir_entry *dent;
358 struct wan_device *wandev; 358 struct wan_device *wandev;
@@ -372,6 +372,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
372 if (wandev->magic != ROUTER_MAGIC) 372 if (wandev->magic != ROUTER_MAGIC)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 lock_kernel();
375 switch (cmd) { 376 switch (cmd) {
376 case ROUTER_SETUP: 377 case ROUTER_SETUP:
377 err = wanrouter_device_setup(wandev, data); 378 err = wanrouter_device_setup(wandev, data);
@@ -403,6 +404,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
403 err = wandev->ioctl(wandev, cmd, arg); 404 err = wandev->ioctl(wandev, cmd, arg);
404 else err = -EINVAL; 405 else err = -EINVAL;
405 } 406 }
407 unlock_kernel();
406 return err; 408 return err;
407} 409}
408 410
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 5bebe40bf4e6..267f7ff49827 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -278,7 +278,7 @@ static const struct file_operations wandev_fops = {
278 .read = seq_read, 278 .read = seq_read,
279 .llseek = seq_lseek, 279 .llseek = seq_lseek,
280 .release = single_release, 280 .release = single_release,
281 .ioctl = wanrouter_ioctl, 281 .unlocked_ioctl = wanrouter_ioctl,
282}; 282};
283 283
284/* 284/*
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 79270903bda6..ab015c62d561 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -29,3 +29,14 @@ config WIRELESS_EXT
29 29
30 Say N (if you can) unless you know you need wireless 30 Say N (if you can) unless you know you need wireless
31 extensions for external modules. 31 extensions for external modules.
32
33config WIRELESS_EXT_SYSFS
34 bool "Wireless extensions sysfs files"
35 default y
36 depends on WIRELESS_EXT && SYSFS
37 help
38 This option enables the deprecated wireless statistics
39 files in /sys/class/net/*/wireless/. The same information
40 is available via the ioctls as well.
41
42 Say Y if you have programs using it (we don't know of any).
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 80afacdae46c..f1da0b93bc56 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -143,8 +143,11 @@ void cfg80211_put_dev(struct cfg80211_registered_device *drv)
143int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, 143int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
144 char *newname) 144 char *newname)
145{ 145{
146 struct cfg80211_registered_device *drv;
146 int idx, taken = -1, result, digits; 147 int idx, taken = -1, result, digits;
147 148
149 mutex_lock(&cfg80211_drv_mutex);
150
148 /* prohibit calling the thing phy%d when %d is not its number */ 151 /* prohibit calling the thing phy%d when %d is not its number */
149 sscanf(newname, PHY_NAME "%d%n", &idx, &taken); 152 sscanf(newname, PHY_NAME "%d%n", &idx, &taken);
150 if (taken == strlen(newname) && idx != rdev->idx) { 153 if (taken == strlen(newname) && idx != rdev->idx) {
@@ -156,14 +159,30 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
156 * deny the name if it is phy<idx> where <idx> is printed 159 * deny the name if it is phy<idx> where <idx> is printed
157 * without leading zeroes. taken == strlen(newname) here 160 * without leading zeroes. taken == strlen(newname) here
158 */ 161 */
162 result = -EINVAL;
159 if (taken == strlen(PHY_NAME) + digits) 163 if (taken == strlen(PHY_NAME) + digits)
160 return -EINVAL; 164 goto out_unlock;
165 }
166
167
168 /* Ignore nop renames */
169 result = 0;
170 if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0)
171 goto out_unlock;
172
173 /* Ensure another device does not already have this name. */
174 list_for_each_entry(drv, &cfg80211_drv_list, list) {
175 result = -EINVAL;
176 if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0)
177 goto out_unlock;
161 } 178 }
162 179
163 /* this will check for collisions */ 180 /* this will only check for collisions in sysfs
181 * which is not even always compiled in.
182 */
164 result = device_rename(&rdev->wiphy.dev, newname); 183 result = device_rename(&rdev->wiphy.dev, newname);
165 if (result) 184 if (result)
166 return result; 185 goto out_unlock;
167 186
168 if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 187 if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
169 rdev->wiphy.debugfsdir, 188 rdev->wiphy.debugfsdir,
@@ -172,9 +191,13 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
172 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", 191 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n",
173 newname); 192 newname);
174 193
175 nl80211_notify_dev_rename(rdev); 194 result = 0;
195out_unlock:
196 mutex_unlock(&cfg80211_drv_mutex);
197 if (result == 0)
198 nl80211_notify_dev_rename(rdev);
176 199
177 return 0; 200 return result;
178} 201}
179 202
180/* exported functions */ 203/* exported functions */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index fb75f265b39c..b7fefffd2d0d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -199,12 +199,14 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
199 199
200 mutex_lock(&cfg80211_drv_mutex); 200 mutex_lock(&cfg80211_drv_mutex);
201 list_for_each_entry(dev, &cfg80211_drv_list, list) { 201 list_for_each_entry(dev, &cfg80211_drv_list, list) {
202 if (++idx < start) 202 if (++idx <= start)
203 continue; 203 continue;
204 if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, 204 if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid,
205 cb->nlh->nlmsg_seq, NLM_F_MULTI, 205 cb->nlh->nlmsg_seq, NLM_F_MULTI,
206 dev) < 0) 206 dev) < 0) {
207 idx--;
207 break; 208 break;
209 }
208 } 210 }
209 mutex_unlock(&cfg80211_drv_mutex); 211 mutex_unlock(&cfg80211_drv_mutex);
210 212
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 28fbd0b0b568..f591871a7b4f 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -59,23 +59,21 @@ int ieee80211_radiotap_iterator_init(
59 return -EINVAL; 59 return -EINVAL;
60 60
61 /* sanity check for allowed length and radiotap length field */ 61 /* sanity check for allowed length and radiotap length field */
62 if (max_length < le16_to_cpu(get_unaligned(&radiotap_header->it_len))) 62 if (max_length < get_unaligned_le16(&radiotap_header->it_len))
63 return -EINVAL; 63 return -EINVAL;
64 64
65 iterator->rtheader = radiotap_header; 65 iterator->rtheader = radiotap_header;
66 iterator->max_length = le16_to_cpu(get_unaligned( 66 iterator->max_length = get_unaligned_le16(&radiotap_header->it_len);
67 &radiotap_header->it_len));
68 iterator->arg_index = 0; 67 iterator->arg_index = 0;
69 iterator->bitmap_shifter = le32_to_cpu(get_unaligned( 68 iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
70 &radiotap_header->it_present));
71 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); 69 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header);
72 iterator->this_arg = NULL; 70 iterator->this_arg = NULL;
73 71
74 /* find payload start allowing for extended bitmap(s) */ 72 /* find payload start allowing for extended bitmap(s) */
75 73
76 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { 74 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) {
77 while (le32_to_cpu(get_unaligned((__le32 *)iterator->arg)) & 75 while (get_unaligned_le32(iterator->arg) &
78 (1<<IEEE80211_RADIOTAP_EXT)) { 76 (1 << IEEE80211_RADIOTAP_EXT)) {
79 iterator->arg += sizeof(u32); 77 iterator->arg += sizeof(u32);
80 78
81 /* 79 /*
@@ -241,8 +239,8 @@ int ieee80211_radiotap_iterator_next(
241 if (iterator->bitmap_shifter & 1) { 239 if (iterator->bitmap_shifter & 1) {
242 /* b31 was set, there is more */ 240 /* b31 was set, there is more */
243 /* move to next u32 bitmap */ 241 /* move to next u32 bitmap */
244 iterator->bitmap_shifter = le32_to_cpu( 242 iterator->bitmap_shifter =
245 get_unaligned(iterator->next_bitmap)); 243 get_unaligned_le32(iterator->next_bitmap);
246 iterator->next_bitmap++; 244 iterator->next_bitmap++;
247 } else 245 } else
248 /* no more bitmaps: end */ 246 /* no more bitmaps: end */
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 947188a5b937..273a84359998 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -500,7 +500,7 @@ static int call_commit_handler(struct net_device *dev)
500/* 500/*
501 * Calculate size of private arguments 501 * Calculate size of private arguments
502 */ 502 */
503static inline int get_priv_size(__u16 args) 503static int get_priv_size(__u16 args)
504{ 504{
505 int num = args & IW_PRIV_SIZE_MASK; 505 int num = args & IW_PRIV_SIZE_MASK;
506 int type = (args & IW_PRIV_TYPE_MASK) >> 12; 506 int type = (args & IW_PRIV_TYPE_MASK) >> 12;
@@ -512,10 +512,9 @@ static inline int get_priv_size(__u16 args)
512/* 512/*
513 * Re-calculate the size of private arguments 513 * Re-calculate the size of private arguments
514 */ 514 */
515static inline int adjust_priv_size(__u16 args, 515static int adjust_priv_size(__u16 args, struct iw_point *iwp)
516 union iwreq_data * wrqu)
517{ 516{
518 int num = wrqu->data.length; 517 int num = iwp->length;
519 int max = args & IW_PRIV_SIZE_MASK; 518 int max = args & IW_PRIV_SIZE_MASK;
520 int type = (args & IW_PRIV_TYPE_MASK) >> 12; 519 int type = (args & IW_PRIV_TYPE_MASK) >> 12;
521 520
@@ -695,19 +694,150 @@ void wext_proc_exit(struct net *net)
695 */ 694 */
696 695
697/* ---------------------------------------------------------------- */ 696/* ---------------------------------------------------------------- */
697static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
698 const struct iw_ioctl_description *descr,
699 iw_handler handler, struct net_device *dev,
700 struct iw_request_info *info)
701{
702 int err, extra_size, user_length = 0, essid_compat = 0;
703 char *extra;
704
705 /* Calculate space needed by arguments. Always allocate
706 * for max space.
707 */
708 extra_size = descr->max_tokens * descr->token_size;
709
710 /* Check need for ESSID compatibility for WE < 21 */
711 switch (cmd) {
712 case SIOCSIWESSID:
713 case SIOCGIWESSID:
714 case SIOCSIWNICKN:
715 case SIOCGIWNICKN:
716 if (iwp->length == descr->max_tokens + 1)
717 essid_compat = 1;
718 else if (IW_IS_SET(cmd) && (iwp->length != 0)) {
719 char essid[IW_ESSID_MAX_SIZE + 1];
720
721 err = copy_from_user(essid, iwp->pointer,
722 iwp->length *
723 descr->token_size);
724 if (err)
725 return -EFAULT;
726
727 if (essid[iwp->length - 1] == '\0')
728 essid_compat = 1;
729 }
730 break;
731 default:
732 break;
733 }
734
735 iwp->length -= essid_compat;
736
737 /* Check what user space is giving us */
738 if (IW_IS_SET(cmd)) {
739 /* Check NULL pointer */
740 if (!iwp->pointer && iwp->length != 0)
741 return -EFAULT;
742 /* Check if number of token fits within bounds */
743 if (iwp->length > descr->max_tokens)
744 return -E2BIG;
745 if (iwp->length < descr->min_tokens)
746 return -EINVAL;
747 } else {
748 /* Check NULL pointer */
749 if (!iwp->pointer)
750 return -EFAULT;
751 /* Save user space buffer size for checking */
752 user_length = iwp->length;
753
754 /* Don't check if user_length > max to allow forward
755 * compatibility. The test user_length < min is
756 * implied by the test at the end.
757 */
758
759 /* Support for very large requests */
760 if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
761 (user_length > descr->max_tokens)) {
762 /* Allow userspace to GET more than max so
763 * we can support any size GET requests.
764 * There is still a limit : -ENOMEM.
765 */
766 extra_size = user_length * descr->token_size;
767
768 /* Note : user_length is originally a __u16,
769 * and token_size is controlled by us,
770 * so extra_size won't get negative and
771 * won't overflow...
772 */
773 }
774 }
775
776 /* kzalloc() ensures NULL-termination for essid_compat. */
777 extra = kzalloc(extra_size, GFP_KERNEL);
778 if (!extra)
779 return -ENOMEM;
780
781 /* If it is a SET, get all the extra data in here */
782 if (IW_IS_SET(cmd) && (iwp->length != 0)) {
783 if (copy_from_user(extra, iwp->pointer,
784 iwp->length *
785 descr->token_size)) {
786 err = -EFAULT;
787 goto out;
788 }
789 }
790
791 err = handler(dev, info, (union iwreq_data *) iwp, extra);
792
793 iwp->length += essid_compat;
794
795 /* If we have something to return to the user */
796 if (!err && IW_IS_GET(cmd)) {
797 /* Check if there is enough buffer up there */
798 if (user_length < iwp->length) {
799 err = -E2BIG;
800 goto out;
801 }
802
803 if (copy_to_user(iwp->pointer, extra,
804 iwp->length *
805 descr->token_size)) {
806 err = -EFAULT;
807 goto out;
808 }
809 }
810
811 /* Generate an event to notify listeners of the change */
812 if ((descr->flags & IW_DESCR_FLAG_EVENT) && err == -EIWCOMMIT) {
813 union iwreq_data *data = (union iwreq_data *) iwp;
814
815 if (descr->flags & IW_DESCR_FLAG_RESTRICT)
816 /* If the event is restricted, don't
817 * export the payload.
818 */
819 wireless_send_event(dev, cmd, data, NULL);
820 else
821 wireless_send_event(dev, cmd, data, extra);
822 }
823
824out:
825 kfree(extra);
826 return err;
827}
828
698/* 829/*
699 * Wrapper to call a standard Wireless Extension handler. 830 * Wrapper to call a standard Wireless Extension handler.
700 * We do various checks and also take care of moving data between 831 * We do various checks and also take care of moving data between
701 * user space and kernel space. 832 * user space and kernel space.
702 */ 833 */
703static int ioctl_standard_call(struct net_device * dev, 834static int ioctl_standard_call(struct net_device * dev,
704 struct ifreq * ifr, 835 struct iwreq *iwr,
705 unsigned int cmd, 836 unsigned int cmd,
837 struct iw_request_info *info,
706 iw_handler handler) 838 iw_handler handler)
707{ 839{
708 struct iwreq * iwr = (struct iwreq *) ifr;
709 const struct iw_ioctl_description * descr; 840 const struct iw_ioctl_description * descr;
710 struct iw_request_info info;
711 int ret = -EINVAL; 841 int ret = -EINVAL;
712 842
713 /* Get the description of the IOCTL */ 843 /* Get the description of the IOCTL */
@@ -715,145 +845,19 @@ static int ioctl_standard_call(struct net_device * dev,
715 return -EOPNOTSUPP; 845 return -EOPNOTSUPP;
716 descr = &(standard_ioctl[cmd - SIOCIWFIRST]); 846 descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
717 847
718 /* Prepare the call */
719 info.cmd = cmd;
720 info.flags = 0;
721
722 /* Check if we have a pointer to user space data or not */ 848 /* Check if we have a pointer to user space data or not */
723 if (descr->header_type != IW_HEADER_TYPE_POINT) { 849 if (descr->header_type != IW_HEADER_TYPE_POINT) {
724 850
725 /* No extra arguments. Trivial to handle */ 851 /* No extra arguments. Trivial to handle */
726 ret = handler(dev, &info, &(iwr->u), NULL); 852 ret = handler(dev, info, &(iwr->u), NULL);
727 853
728 /* Generate an event to notify listeners of the change */ 854 /* Generate an event to notify listeners of the change */
729 if ((descr->flags & IW_DESCR_FLAG_EVENT) && 855 if ((descr->flags & IW_DESCR_FLAG_EVENT) &&
730 ((ret == 0) || (ret == -EIWCOMMIT))) 856 ((ret == 0) || (ret == -EIWCOMMIT)))
731 wireless_send_event(dev, cmd, &(iwr->u), NULL); 857 wireless_send_event(dev, cmd, &(iwr->u), NULL);
732 } else { 858 } else {
733 char * extra; 859 ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr,
734 int extra_size; 860 handler, dev, info);
735 int user_length = 0;
736 int err;
737 int essid_compat = 0;
738
739 /* Calculate space needed by arguments. Always allocate
740 * for max space. Easier, and won't last long... */
741 extra_size = descr->max_tokens * descr->token_size;
742
743 /* Check need for ESSID compatibility for WE < 21 */
744 switch (cmd) {
745 case SIOCSIWESSID:
746 case SIOCGIWESSID:
747 case SIOCSIWNICKN:
748 case SIOCGIWNICKN:
749 if (iwr->u.data.length == descr->max_tokens + 1)
750 essid_compat = 1;
751 else if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
752 char essid[IW_ESSID_MAX_SIZE + 1];
753
754 err = copy_from_user(essid, iwr->u.data.pointer,
755 iwr->u.data.length *
756 descr->token_size);
757 if (err)
758 return -EFAULT;
759
760 if (essid[iwr->u.data.length - 1] == '\0')
761 essid_compat = 1;
762 }
763 break;
764 default:
765 break;
766 }
767
768 iwr->u.data.length -= essid_compat;
769
770 /* Check what user space is giving us */
771 if (IW_IS_SET(cmd)) {
772 /* Check NULL pointer */
773 if ((iwr->u.data.pointer == NULL) &&
774 (iwr->u.data.length != 0))
775 return -EFAULT;
776 /* Check if number of token fits within bounds */
777 if (iwr->u.data.length > descr->max_tokens)
778 return -E2BIG;
779 if (iwr->u.data.length < descr->min_tokens)
780 return -EINVAL;
781 } else {
782 /* Check NULL pointer */
783 if (iwr->u.data.pointer == NULL)
784 return -EFAULT;
785 /* Save user space buffer size for checking */
786 user_length = iwr->u.data.length;
787
788 /* Don't check if user_length > max to allow forward
789 * compatibility. The test user_length < min is
790 * implied by the test at the end. */
791
792 /* Support for very large requests */
793 if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
794 (user_length > descr->max_tokens)) {
795 /* Allow userspace to GET more than max so
796 * we can support any size GET requests.
797 * There is still a limit : -ENOMEM. */
798 extra_size = user_length * descr->token_size;
799 /* Note : user_length is originally a __u16,
800 * and token_size is controlled by us,
801 * so extra_size won't get negative and
802 * won't overflow... */
803 }
804 }
805
806 /* Create the kernel buffer */
807 /* kzalloc ensures NULL-termination for essid_compat */
808 extra = kzalloc(extra_size, GFP_KERNEL);
809 if (extra == NULL)
810 return -ENOMEM;
811
812 /* If it is a SET, get all the extra data in here */
813 if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
814 err = copy_from_user(extra, iwr->u.data.pointer,
815 iwr->u.data.length *
816 descr->token_size);
817 if (err) {
818 kfree(extra);
819 return -EFAULT;
820 }
821 }
822
823 /* Call the handler */
824 ret = handler(dev, &info, &(iwr->u), extra);
825
826 iwr->u.data.length += essid_compat;
827
828 /* If we have something to return to the user */
829 if (!ret && IW_IS_GET(cmd)) {
830 /* Check if there is enough buffer up there */
831 if (user_length < iwr->u.data.length) {
832 kfree(extra);
833 return -E2BIG;
834 }
835
836 err = copy_to_user(iwr->u.data.pointer, extra,
837 iwr->u.data.length *
838 descr->token_size);
839 if (err)
840 ret = -EFAULT;
841 }
842
843 /* Generate an event to notify listeners of the change */
844 if ((descr->flags & IW_DESCR_FLAG_EVENT) &&
845 ((ret == 0) || (ret == -EIWCOMMIT))) {
846 if (descr->flags & IW_DESCR_FLAG_RESTRICT)
847 /* If the event is restricted, don't
848 * export the payload */
849 wireless_send_event(dev, cmd, &(iwr->u), NULL);
850 else
851 wireless_send_event(dev, cmd, &(iwr->u),
852 extra);
853 }
854
855 /* Cleanup - I told you it wasn't that long ;-) */
856 kfree(extra);
857 } 861 }
858 862
859 /* Call commit handler if needed and defined */ 863 /* Call commit handler if needed and defined */
@@ -881,25 +885,22 @@ static int ioctl_standard_call(struct net_device * dev,
881 * a iw_handler but process it in your ioctl handler (i.e. use the 885 * a iw_handler but process it in your ioctl handler (i.e. use the
882 * old driver API). 886 * old driver API).
883 */ 887 */
884static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr, 888static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd,
885 unsigned int cmd, iw_handler handler) 889 const struct iw_priv_args **descrp)
886{ 890{
887 struct iwreq * iwr = (struct iwreq *) ifr; 891 const struct iw_priv_args *descr;
888 const struct iw_priv_args * descr = NULL; 892 int i, extra_size;
889 struct iw_request_info info;
890 int extra_size = 0;
891 int i;
892 int ret = -EINVAL;
893 893
894 /* Get the description of the IOCTL */ 894 descr = NULL;
895 for (i = 0; i < dev->wireless_handlers->num_private_args; i++) 895 for (i = 0; i < dev->wireless_handlers->num_private_args; i++) {
896 if (cmd == dev->wireless_handlers->private_args[i].cmd) { 896 if (cmd == dev->wireless_handlers->private_args[i].cmd) {
897 descr = &(dev->wireless_handlers->private_args[i]); 897 descr = &dev->wireless_handlers->private_args[i];
898 break; 898 break;
899 } 899 }
900 }
900 901
901 /* Compute the size of the set/get arguments */ 902 extra_size = 0;
902 if (descr != NULL) { 903 if (descr) {
903 if (IW_IS_SET(cmd)) { 904 if (IW_IS_SET(cmd)) {
904 int offset = 0; /* For sub-ioctls */ 905 int offset = 0; /* For sub-ioctls */
905 /* Check for sub-ioctl handler */ 906 /* Check for sub-ioctl handler */
@@ -924,72 +925,77 @@ static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr,
924 extra_size = 0; 925 extra_size = 0;
925 } 926 }
926 } 927 }
928 *descrp = descr;
929 return extra_size;
930}
927 931
928 /* Prepare the call */ 932static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
929 info.cmd = cmd; 933 const struct iw_priv_args *descr,
930 info.flags = 0; 934 iw_handler handler, struct net_device *dev,
935 struct iw_request_info *info, int extra_size)
936{
937 char *extra;
938 int err;
931 939
932 /* Check if we have a pointer to user space data or not. */ 940 /* Check what user space is giving us */
933 if (extra_size == 0) { 941 if (IW_IS_SET(cmd)) {
934 /* No extra arguments. Trivial to handle */ 942 if (!iwp->pointer && iwp->length != 0)
935 ret = handler(dev, &info, &(iwr->u), (char *) &(iwr->u)); 943 return -EFAULT;
936 } else {
937 char * extra;
938 int err;
939 944
940 /* Check what user space is giving us */ 945 if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK))
941 if (IW_IS_SET(cmd)) { 946 return -E2BIG;
942 /* Check NULL pointer */ 947 } else if (!iwp->pointer)
943 if ((iwr->u.data.pointer == NULL) && 948 return -EFAULT;
944 (iwr->u.data.length != 0))
945 return -EFAULT;
946 949
947 /* Does it fits within bounds ? */ 950 extra = kmalloc(extra_size, GFP_KERNEL);
948 if (iwr->u.data.length > (descr->set_args & 951 if (!extra)
949 IW_PRIV_SIZE_MASK)) 952 return -ENOMEM;
950 return -E2BIG;
951 } else if (iwr->u.data.pointer == NULL)
952 return -EFAULT;
953 953
954 /* Always allocate for max space. Easier, and won't last 954 /* If it is a SET, get all the extra data in here */
955 * long... */ 955 if (IW_IS_SET(cmd) && (iwp->length != 0)) {
956 extra = kmalloc(extra_size, GFP_KERNEL); 956 if (copy_from_user(extra, iwp->pointer, extra_size)) {
957 if (extra == NULL) 957 err = -EFAULT;
958 return -ENOMEM; 958 goto out;
959
960 /* If it is a SET, get all the extra data in here */
961 if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
962 err = copy_from_user(extra, iwr->u.data.pointer,
963 extra_size);
964 if (err) {
965 kfree(extra);
966 return -EFAULT;
967 }
968 } 959 }
960 }
969 961
970 /* Call the handler */ 962 /* Call the handler */
971 ret = handler(dev, &info, &(iwr->u), extra); 963 err = handler(dev, info, (union iwreq_data *) iwp, extra);
972 964
973 /* If we have something to return to the user */ 965 /* If we have something to return to the user */
974 if (!ret && IW_IS_GET(cmd)) { 966 if (!err && IW_IS_GET(cmd)) {
967 /* Adjust for the actual length if it's variable,
968 * avoid leaking kernel bits outside.
969 */
970 if (!(descr->get_args & IW_PRIV_SIZE_FIXED))
971 extra_size = adjust_priv_size(descr->get_args, iwp);
975 972
976 /* Adjust for the actual length if it's variable, 973 if (copy_to_user(iwp->pointer, extra, extra_size))
977 * avoid leaking kernel bits outside. */ 974 err = -EFAULT;
978 if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) { 975 }
979 extra_size = adjust_priv_size(descr->get_args,
980 &(iwr->u));
981 }
982 976
983 err = copy_to_user(iwr->u.data.pointer, extra, 977out:
984 extra_size); 978 kfree(extra);
985 if (err) 979 return err;
986 ret = -EFAULT; 980}
987 }
988 981
989 /* Cleanup - I told you it wasn't that long ;-) */ 982static int ioctl_private_call(struct net_device *dev, struct iwreq *iwr,
990 kfree(extra); 983 unsigned int cmd, struct iw_request_info *info,
991 } 984 iw_handler handler)
985{
986 int extra_size = 0, ret = -EINVAL;
987 const struct iw_priv_args *descr;
992 988
989 extra_size = get_priv_descr_and_size(dev, cmd, &descr);
990
991 /* Check if we have a pointer to user space data or not. */
992 if (extra_size == 0) {
993 /* No extra arguments. Trivial to handle */
994 ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u));
995 } else {
996 ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr,
997 handler, dev, info, extra_size);
998 }
993 999
994 /* Call commit handler if needed and defined */ 1000 /* Call commit handler if needed and defined */
995 if (ret == -EIWCOMMIT) 1001 if (ret == -EIWCOMMIT)
@@ -999,12 +1005,21 @@ static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr,
999} 1005}
1000 1006
1001/* ---------------------------------------------------------------- */ 1007/* ---------------------------------------------------------------- */
1008typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *,
1009 unsigned int, struct iw_request_info *,
1010 iw_handler);
1011
1002/* 1012/*
1003 * Main IOCTl dispatcher. 1013 * Main IOCTl dispatcher.
1004 * Check the type of IOCTL and call the appropriate wrapper... 1014 * Check the type of IOCTL and call the appropriate wrapper...
1005 */ 1015 */
1006static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd) 1016static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
1017 unsigned int cmd,
1018 struct iw_request_info *info,
1019 wext_ioctl_func standard,
1020 wext_ioctl_func private)
1007{ 1021{
1022 struct iwreq *iwr = (struct iwreq *) ifr;
1008 struct net_device *dev; 1023 struct net_device *dev;
1009 iw_handler handler; 1024 iw_handler handler;
1010 1025
@@ -1019,12 +1034,12 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i
1019 * Note that 'cmd' is already filtered in dev_ioctl() with 1034 * Note that 'cmd' is already filtered in dev_ioctl() with
1020 * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ 1035 * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */
1021 if (cmd == SIOCGIWSTATS) 1036 if (cmd == SIOCGIWSTATS)
1022 return ioctl_standard_call(dev, ifr, cmd, 1037 return standard(dev, iwr, cmd, info,
1023 &iw_handler_get_iwstats); 1038 &iw_handler_get_iwstats);
1024 1039
1025 if (cmd == SIOCGIWPRIV && dev->wireless_handlers) 1040 if (cmd == SIOCGIWPRIV && dev->wireless_handlers)
1026 return ioctl_standard_call(dev, ifr, cmd, 1041 return standard(dev, iwr, cmd, info,
1027 &iw_handler_get_private); 1042 &iw_handler_get_private);
1028 1043
1029 /* Basic check */ 1044 /* Basic check */
1030 if (!netif_device_present(dev)) 1045 if (!netif_device_present(dev))
@@ -1035,9 +1050,9 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i
1035 if (handler) { 1050 if (handler) {
1036 /* Standard and private are not the same */ 1051 /* Standard and private are not the same */
1037 if (cmd < SIOCIWFIRSTPRIV) 1052 if (cmd < SIOCIWFIRSTPRIV)
1038 return ioctl_standard_call(dev, ifr, cmd, handler); 1053 return standard(dev, iwr, cmd, info, handler);
1039 else 1054 else
1040 return ioctl_private_call(dev, ifr, cmd, handler); 1055 return private(dev, iwr, cmd, info, handler);
1041 } 1056 }
1042 /* Old driver API : call driver ioctl handler */ 1057 /* Old driver API : call driver ioctl handler */
1043 if (dev->do_ioctl) 1058 if (dev->do_ioctl)
@@ -1045,27 +1060,154 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i
1045 return -EOPNOTSUPP; 1060 return -EOPNOTSUPP;
1046} 1061}
1047 1062
1048/* entry point from dev ioctl */ 1063/* If command is `set a parameter', or `get the encoding parameters',
1049int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1064 * check if the user has the right to do it.
1050 void __user *arg) 1065 */
1066static int wext_permission_check(unsigned int cmd)
1051{ 1067{
1052 int ret;
1053
1054 /* If command is `set a parameter', or
1055 * `get the encoding parameters', check if
1056 * the user has the right to do it */
1057 if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) 1068 if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT)
1058 && !capable(CAP_NET_ADMIN)) 1069 && !capable(CAP_NET_ADMIN))
1059 return -EPERM; 1070 return -EPERM;
1060 1071
1072 return 0;
1073}
1074
1075/* entry point from dev ioctl */
1076static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
1077 unsigned int cmd, struct iw_request_info *info,
1078 wext_ioctl_func standard,
1079 wext_ioctl_func private)
1080{
1081 int ret = wext_permission_check(cmd);
1082
1083 if (ret)
1084 return ret;
1085
1061 dev_load(net, ifr->ifr_name); 1086 dev_load(net, ifr->ifr_name);
1062 rtnl_lock(); 1087 rtnl_lock();
1063 ret = wireless_process_ioctl(net, ifr, cmd); 1088 ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private);
1064 rtnl_unlock(); 1089 rtnl_unlock();
1065 if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct iwreq))) 1090
1091 return ret;
1092}
1093
1094int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
1095 void __user *arg)
1096{
1097 struct iw_request_info info = { .cmd = cmd, .flags = 0 };
1098 int ret;
1099
1100 ret = wext_ioctl_dispatch(net, ifr, cmd, &info,
1101 ioctl_standard_call,
1102 ioctl_private_call);
1103 if (ret >= 0 &&
1104 IW_IS_GET(cmd) &&
1105 copy_to_user(arg, ifr, sizeof(struct iwreq)))
1106 return -EFAULT;
1107
1108 return ret;
1109}
1110
1111#ifdef CONFIG_COMPAT
1112static int compat_standard_call(struct net_device *dev,
1113 struct iwreq *iwr,
1114 unsigned int cmd,
1115 struct iw_request_info *info,
1116 iw_handler handler)
1117{
1118 const struct iw_ioctl_description *descr;
1119 struct compat_iw_point *iwp_compat;
1120 struct iw_point iwp;
1121 int err;
1122
1123 descr = standard_ioctl + (cmd - SIOCIWFIRST);
1124
1125 if (descr->header_type != IW_HEADER_TYPE_POINT)
1126 return ioctl_standard_call(dev, iwr, cmd, info, handler);
1127
1128 iwp_compat = (struct compat_iw_point *) &iwr->u.data;
1129 iwp.pointer = compat_ptr(iwp_compat->pointer);
1130 iwp.length = iwp_compat->length;
1131 iwp.flags = iwp_compat->flags;
1132
1133 err = ioctl_standard_iw_point(&iwp, cmd, descr, handler, dev, info);
1134
1135 iwp_compat->pointer = ptr_to_compat(iwp.pointer);
1136 iwp_compat->length = iwp.length;
1137 iwp_compat->flags = iwp.flags;
1138
1139 return err;
1140}
1141
1142static int compat_private_call(struct net_device *dev, struct iwreq *iwr,
1143 unsigned int cmd, struct iw_request_info *info,
1144 iw_handler handler)
1145{
1146 const struct iw_priv_args *descr;
1147 int ret, extra_size;
1148
1149 extra_size = get_priv_descr_and_size(dev, cmd, &descr);
1150
1151 /* Check if we have a pointer to user space data or not. */
1152 if (extra_size == 0) {
1153 /* No extra arguments. Trivial to handle */
1154 ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u));
1155 } else {
1156 struct compat_iw_point *iwp_compat;
1157 struct iw_point iwp;
1158
1159 iwp_compat = (struct compat_iw_point *) &iwr->u.data;
1160 iwp.pointer = compat_ptr(iwp_compat->pointer);
1161 iwp.length = iwp_compat->length;
1162 iwp.flags = iwp_compat->flags;
1163
1164 ret = ioctl_private_iw_point(&iwp, cmd, descr,
1165 handler, dev, info, extra_size);
1166
1167 iwp_compat->pointer = ptr_to_compat(iwp.pointer);
1168 iwp_compat->length = iwp.length;
1169 iwp_compat->flags = iwp.flags;
1170 }
1171
1172 /* Call commit handler if needed and defined */
1173 if (ret == -EIWCOMMIT)
1174 ret = call_commit_handler(dev);
1175
1176 return ret;
1177}
1178
1179int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
1180 unsigned long arg)
1181{
1182 void __user *argp = (void __user *)arg;
1183 struct iw_request_info info;
1184 struct iwreq iwr;
1185 char *colon;
1186 int ret;
1187
1188 if (copy_from_user(&iwr, argp, sizeof(struct iwreq)))
1189 return -EFAULT;
1190
1191 iwr.ifr_name[IFNAMSIZ-1] = 0;
1192 colon = strchr(iwr.ifr_name, ':');
1193 if (colon)
1194 *colon = 0;
1195
1196 info.cmd = cmd;
1197 info.flags = IW_REQUEST_FLAG_COMPAT;
1198
1199 ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info,
1200 compat_standard_call,
1201 compat_private_call);
1202
1203 if (ret >= 0 &&
1204 IW_IS_GET(cmd) &&
1205 copy_to_user(argp, &iwr, sizeof(struct iwreq)))
1066 return -EFAULT; 1206 return -EFAULT;
1207
1067 return ret; 1208 return ret;
1068} 1209}
1210#endif
1069 1211
1070/************************* EVENT PROCESSING *************************/ 1212/************************* EVENT PROCESSING *************************/
1071/* 1213/*
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 6ba67c523c16..7b1c6ef04553 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -555,13 +555,11 @@ static struct sock *x25_make_new(struct sock *osk)
555 x25 = x25_sk(sk); 555 x25 = x25_sk(sk);
556 556
557 sk->sk_type = osk->sk_type; 557 sk->sk_type = osk->sk_type;
558 sk->sk_socket = osk->sk_socket;
559 sk->sk_priority = osk->sk_priority; 558 sk->sk_priority = osk->sk_priority;
560 sk->sk_protocol = osk->sk_protocol; 559 sk->sk_protocol = osk->sk_protocol;
561 sk->sk_rcvbuf = osk->sk_rcvbuf; 560 sk->sk_rcvbuf = osk->sk_rcvbuf;
562 sk->sk_sndbuf = osk->sk_sndbuf; 561 sk->sk_sndbuf = osk->sk_sndbuf;
563 sk->sk_state = TCP_ESTABLISHED; 562 sk->sk_state = TCP_ESTABLISHED;
564 sk->sk_sleep = osk->sk_sleep;
565 sk->sk_backlog_rcv = osk->sk_backlog_rcv; 563 sk->sk_backlog_rcv = osk->sk_backlog_rcv;
566 sock_copy_flags(sk, osk); 564 sock_copy_flags(sk, osk);
567 565
@@ -614,8 +612,7 @@ static int x25_release(struct socket *sock)
614 break; 612 break;
615 } 613 }
616 614
617 sock->sk = NULL; 615 sock_orphan(sk);
618 sk->sk_socket = NULL; /* Not used, but we should do this */
619out: 616out:
620 return 0; 617 return 0;
621} 618}
@@ -808,14 +805,12 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
808 if (!skb->sk) 805 if (!skb->sk)
809 goto out2; 806 goto out2;
810 newsk = skb->sk; 807 newsk = skb->sk;
811 newsk->sk_socket = newsock; 808 sock_graft(newsk, newsock);
812 newsk->sk_sleep = &newsock->wait;
813 809
814 /* Now attach up the new socket */ 810 /* Now attach up the new socket */
815 skb->sk = NULL; 811 skb->sk = NULL;
816 kfree_skb(skb); 812 kfree_skb(skb);
817 sk->sk_ack_backlog--; 813 sk->sk_ack_backlog--;
818 newsock->sk = newsk;
819 newsock->state = SS_CONNECTED; 814 newsock->state = SS_CONNECTED;
820 rc = 0; 815 rc = 0;
821out2: 816out2: