aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/Kconfig7
-rw-r--r--net/802/Makefile2
-rw-r--r--net/802/garp.c633
-rw-r--r--net/802/stp.c102
-rw-r--r--net/8021q/Kconfig10
-rw-r--r--net/8021q/Makefile9
-rw-r--r--net/8021q/vlan.c33
-rw-r--r--net/8021q/vlan.h19
-rw-r--r--net/8021q/vlan_dev.c32
-rw-r--r--net/8021q/vlan_gvrp.c66
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/Kconfig1
-rw-r--r--net/atm/addr.c10
-rw-r--r--net/atm/addr.h4
-rw-r--r--net/atm/br2684.c14
-rw-r--r--net/atm/common.c8
-rw-r--r--net/atm/lec.c55
-rw-r--r--net/atm/lec.h10
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/ax25/ax25_std_timer.c8
-rw-r--r--net/bluetooth/bnep/bnep.h4
-rw-r--r--net/bluetooth/bnep/core.c4
-rw-r--r--net/bluetooth/bnep/netdev.c4
-rw-r--r--net/bluetooth/bnep/sock.c4
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br.c18
-rw-r--r--net/bridge/br_device.c14
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_forward.c6
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_input.c25
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_notify.c2
-rw-r--r--net/bridge/br_private.h10
-rw-r--r--net/bridge/br_private_stp.h2
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_bpdu.c14
-rw-r--r--net/bridge/br_stp_if.c6
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/netfilter/Kconfig9
-rw-r--r--net/bridge/netfilter/Makefile1
-rw-r--r--net/bridge/netfilter/ebt_ip6.c144
-rw-r--r--net/bridge/netfilter/ebt_log.c66
-rw-r--r--net/core/dev.c85
-rw-r--r--net/core/ethtool.c37
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/net-sysfs.c22
-rw-r--r--net/core/rtnetlink.c20
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/sysctl_net_core.c39
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/af_decnet.c30
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ieee80211/ieee80211_rx.c2
-rw-r--r--net/ieee80211/ieee80211_tx.c86
-rw-r--r--net/ieee80211/ieee80211_wx.c137
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/arp.c4
-rw-r--r--net/ipv4/devinet.c35
-rw-r--r--net/ipv4/fib_frontend.c19
-rw-r--r--net/ipv4/fib_hash.c8
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c8
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_hashtables.c19
-rw-r--r--net/ipv4/inet_timewait_sock.c6
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/ip_forward.c5
-rw-r--r--net/ipv4/ip_fragment.c36
-rw-r--r--net/ipv4/ip_gre.c32
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c24
-rw-r--r--net/ipv4/ipmr.c59
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_esp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/netfilter/Kconfig12
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_queue.c3
-rw-r--r--net/ipv4/netfilter/iptable_security.c180
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c5
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/protocol.c2
-rw-r--r--net/ipv4/raw.c7
-rw-r--r--net/ipv4/route.c257
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp.c72
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_input.c88
-rw-r--r--net/ipv4/tcp_ipv4.c170
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv4/tcp_timer.c17
-rw-r--r--net/ipv4/udp.c62
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv6/addrconf.c83
-rw-r--r--net/ipv6/addrlabel.c106
-rw-r--r--net/ipv6/af_inet6.c17
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/inet6_hashtables.c9
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c5
-rw-r--r--net/ipv6/ip6_output.c15
-rw-r--r--net/ipv6/ip6_tunnel.c28
-rw-r--r--net/ipv6/ip6mr.c62
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c3
-rw-r--r--net/ipv6/netfilter/ip6table_security.c172
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c5
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/protocol.c2
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/reassembly.c63
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/sit.c26
-rw-r--r--net/ipv6/sysctl_net_ipv6.c29
-rw-r--r--net/ipv6/tcp_ipv6.c144
-rw-r--r--net/ipv6/udp.c53
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c54
-rw-r--r--net/irda/irnet/irnet_ppp.h7
-rw-r--r--net/iucv/af_iucv.c1
-rw-r--r--net/iucv/iucv.c13
-rw-r--r--net/key/af_key.c622
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/Kconfig176
-rw-r--r--net/mac80211/Makefile20
-rw-r--r--net/mac80211/aes_ccm.c2
-rw-r--r--net/mac80211/aes_ccm.h2
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/debugfs.c43
-rw-r--r--net/mac80211/debugfs_key.c8
-rw-r--r--net/mac80211/debugfs_netdev.c11
-rw-r--r--net/mac80211/debugfs_sta.c56
-rw-r--r--net/mac80211/ieee80211_i.h112
-rw-r--r--net/mac80211/iface.c12
-rw-r--r--net/mac80211/key.c11
-rw-r--r--net/mac80211/key.h54
-rw-r--r--net/mac80211/main.c369
-rw-r--r--net/mac80211/mesh.c38
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c54
-rw-r--r--net/mac80211/mesh_plink.c88
-rw-r--r--net/mac80211/michael.c106
-rw-r--r--net/mac80211/michael.h8
-rw-r--r--net/mac80211/mlme.c851
-rw-r--r--net/mac80211/rate.c12
-rw-r--r--net/mac80211/rate.h37
-rw-r--r--net/mac80211/rc80211_pid.h4
-rw-r--r--net/mac80211/rc80211_pid_algo.c32
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c8
-rw-r--r--net/mac80211/rx.c526
-rw-r--r--net/mac80211/sta_info.c25
-rw-r--r--net/mac80211/sta_info.h94
-rw-r--r--net/mac80211/tkip.c282
-rw-r--r--net/mac80211/tkip.h8
-rw-r--r--net/mac80211/tx.c1057
-rw-r--r--net/mac80211/util.c140
-rw-r--r--net/mac80211/wep.c71
-rw-r--r--net/mac80211/wep.h2
-rw-r--r--net/mac80211/wext.c78
-rw-r--r--net/mac80211/wme.c166
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/wpa.c246
-rw-r--r--net/netfilter/nf_conntrack_core.c19
-rw-r--r--net/netfilter/nf_conntrack_extend.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c30
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c80
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c9
-rw-r--r--net/netfilter/nfnetlink_queue.c1
-rw-r--r--net/netfilter/xt_CONNSECMARK.c10
-rw-r--r--net/netfilter/xt_SECMARK.c10
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c8
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rfkill/rfkill-input.c98
-rw-r--r--net/rfkill/rfkill-input.h1
-rw-r--r--net/rfkill/rfkill.c304
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/rose/rose_route.c29
-rw-r--r--net/rxrpc/ar-input.c5
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/input.c27
-rw-r--r--net/sctp/output.c7
-rw-r--r--net/sctp/outqueue.c34
-rw-r--r--net/sctp/proc.c141
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/sm_sideeffect.c44
-rw-r--r--net/sctp/sm_statefuns.c16
-rw-r--r--net/sctp/socket.c321
-rw-r--r--net/socket.c10
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sysctl_net.c31
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/cluster.c2
-rw-r--r--net/tipc/config.c11
-rw-r--r--net/tipc/core.c13
-rw-r--r--net/tipc/core.h126
-rw-r--r--net/tipc/dbg.c231
-rw-r--r--net/tipc/dbg.h12
-rw-r--r--net/tipc/discover.c14
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/link.c80
-rw-r--r--net/tipc/msg.c13
-rw-r--r--net/tipc/msg.h42
-rw-r--r--net/tipc/name_distr.c6
-rw-r--r--net/tipc/name_table.c14
-rw-r--r--net/tipc/net.c10
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/netlink.c16
-rw-r--r--net/tipc/node.c26
-rw-r--r--net/tipc/port.c79
-rw-r--r--net/tipc/ref.c12
-rw-r--r--net/tipc/socket.c5
-rw-r--r--net/tipc/subscr.c249
-rw-r--r--net/tipc/subscr.h34
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wanrouter/wanmain.c6
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/wireless/core.c33
-rw-r--r--net/wireless/radiotap.c16
-rw-r--r--net/wireless/wext.c582
-rw-r--r--net/x25/af_x25.c9
270 files changed, 7260 insertions, 5204 deletions
diff --git a/net/802/Kconfig b/net/802/Kconfig
new file mode 100644
index 000000000000..be33d27c8e69
--- /dev/null
+++ b/net/802/Kconfig
@@ -0,0 +1,7 @@
1config STP
2 tristate
3 select LLC
4
5config GARP
6 tristate
7 select STP
diff --git a/net/802/Makefile b/net/802/Makefile
index 68569ffddea1..7893d679910c 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_FDDI) += fddi.o
10obj-$(CONFIG_HIPPI) += hippi.o 10obj-$(CONFIG_HIPPI) += hippi.o
11obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o 11obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
12obj-$(CONFIG_ATALK) += p8022.o psnap.o 12obj-$(CONFIG_ATALK) += p8022.o psnap.o
13obj-$(CONFIG_STP) += stp.o
14obj-$(CONFIG_GARP) += garp.o
diff --git a/net/802/garp.c b/net/802/garp.c
new file mode 100644
index 000000000000..3b78f7b74fd4
--- /dev/null
+++ b/net/802/garp.c
@@ -0,0 +1,633 @@
1/*
2 * IEEE 802.1D Generic Attribute Registration Protocol (GARP)
3 *
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/timer.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/llc.h>
17#include <net/llc.h>
18#include <net/llc_pdu.h>
19#include <net/garp.h>
20#include <asm/unaligned.h>
21
22static unsigned int garp_join_time __read_mostly = 200;
23module_param(garp_join_time, uint, 0644);
24MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)");
25MODULE_LICENSE("GPL");
26
27static const struct garp_state_trans {
28 u8 state;
29 u8 action;
30} garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = {
31 [GARP_APPLICANT_VA] = {
32 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
33 .action = GARP_ACTION_S_JOIN_IN },
34 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA },
35 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
36 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
37 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
38 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
39 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
40 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
41 },
42 [GARP_APPLICANT_AA] = {
43 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
44 .action = GARP_ACTION_S_JOIN_IN },
45 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
46 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
47 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
48 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
49 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
50 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
51 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
52 },
53 [GARP_APPLICANT_QA] = {
54 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
55 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
56 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
57 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
58 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
59 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
60 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
61 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
62 },
63 [GARP_APPLICANT_LA] = {
64 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO,
65 .action = GARP_ACTION_S_LEAVE_EMPTY },
66 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA },
67 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
68 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA },
69 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA },
70 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
71 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA },
72 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
73 },
74 [GARP_APPLICANT_VP] = {
75 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
76 .action = GARP_ACTION_S_JOIN_IN },
77 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP },
78 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
79 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
80 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
81 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
82 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
83 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO },
84 },
85 [GARP_APPLICANT_AP] = {
86 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
87 .action = GARP_ACTION_S_JOIN_IN },
88 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
89 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
90 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
91 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
92 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
93 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
94 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO },
95 },
96 [GARP_APPLICANT_QP] = {
97 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
98 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
99 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
100 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
101 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
102 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
103 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
104 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO },
105 },
106 [GARP_APPLICANT_VO] = {
107 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
108 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO },
109 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
110 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
111 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
112 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
113 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP },
114 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
115 },
116 [GARP_APPLICANT_AO] = {
117 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
118 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
119 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
120 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
121 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
122 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
123 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP },
124 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
125 },
126 [GARP_APPLICANT_QO] = {
127 [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
128 [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
129 [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
130 [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
131 [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
132 [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
133 [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP },
134 [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
135 },
136};
137
138static int garp_attr_cmp(const struct garp_attr *attr,
139 const void *data, u8 len, u8 type)
140{
141 if (attr->type != type)
142 return attr->type - type;
143 if (attr->dlen != len)
144 return attr->dlen - len;
145 return memcmp(attr->data, data, len);
146}
147
148static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
149 const void *data, u8 len, u8 type)
150{
151 struct rb_node *parent = app->gid.rb_node;
152 struct garp_attr *attr;
153 int d;
154
155 while (parent) {
156 attr = rb_entry(parent, struct garp_attr, node);
157 d = garp_attr_cmp(attr, data, len, type);
158 if (d < 0)
159 parent = parent->rb_left;
160 else if (d > 0)
161 parent = parent->rb_right;
162 else
163 return attr;
164 }
165 return NULL;
166}
167
168static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new)
169{
170 struct rb_node *parent = NULL, **p = &app->gid.rb_node;
171 struct garp_attr *attr;
172 int d;
173
174 while (*p) {
175 parent = *p;
176 attr = rb_entry(parent, struct garp_attr, node);
177 d = garp_attr_cmp(attr, new->data, new->dlen, new->type);
178 if (d < 0)
179 p = &parent->rb_left;
180 else if (d > 0)
181 p = &parent->rb_right;
182 }
183 rb_link_node(&new->node, parent, p);
184 rb_insert_color(&new->node, &app->gid);
185}
186
187static struct garp_attr *garp_attr_create(struct garp_applicant *app,
188 const void *data, u8 len, u8 type)
189{
190 struct garp_attr *attr;
191
192 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
193 if (!attr)
194 return attr;
195 attr->state = GARP_APPLICANT_VO;
196 attr->type = type;
197 attr->dlen = len;
198 memcpy(attr->data, data, len);
199 garp_attr_insert(app, attr);
200 return attr;
201}
202
203static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr)
204{
205 rb_erase(&attr->node, &app->gid);
206 kfree(attr);
207}
208
209static int garp_pdu_init(struct garp_applicant *app)
210{
211 struct sk_buff *skb;
212 struct garp_pdu_hdr *gp;
213
214#define LLC_RESERVE sizeof(struct llc_pdu_un)
215 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
216 GFP_ATOMIC);
217 if (!skb)
218 return -ENOMEM;
219
220 skb->dev = app->dev;
221 skb->protocol = htons(ETH_P_802_2);
222 skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE);
223
224 gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp));
225 put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol);
226
227 app->pdu = skb;
228 return 0;
229}
230
231static int garp_pdu_append_end_mark(struct garp_applicant *app)
232{
233 if (skb_tailroom(app->pdu) < sizeof(u8))
234 return -1;
235 *(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK;
236 return 0;
237}
238
239static void garp_pdu_queue(struct garp_applicant *app)
240{
241 if (!app->pdu)
242 return;
243
244 garp_pdu_append_end_mark(app);
245 garp_pdu_append_end_mark(app);
246
247 llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
248 LLC_SAP_BSPAN, LLC_PDU_CMD);
249 llc_pdu_init_as_ui_cmd(app->pdu);
250 llc_mac_hdr_init(app->pdu, app->dev->dev_addr,
251 app->app->proto.group_address);
252
253 skb_queue_tail(&app->queue, app->pdu);
254 app->pdu = NULL;
255}
256
257static void garp_queue_xmit(struct garp_applicant *app)
258{
259 struct sk_buff *skb;
260
261 while ((skb = skb_dequeue(&app->queue)))
262 dev_queue_xmit(skb);
263}
264
265static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype)
266{
267 struct garp_msg_hdr *gm;
268
269 if (skb_tailroom(app->pdu) < sizeof(*gm))
270 return -1;
271 gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm));
272 gm->attrtype = attrtype;
273 garp_cb(app->pdu)->cur_type = attrtype;
274 return 0;
275}
276
277static int garp_pdu_append_attr(struct garp_applicant *app,
278 const struct garp_attr *attr,
279 enum garp_attr_event event)
280{
281 struct garp_attr_hdr *ga;
282 unsigned int len;
283 int err;
284again:
285 if (!app->pdu) {
286 err = garp_pdu_init(app);
287 if (err < 0)
288 return err;
289 }
290
291 if (garp_cb(app->pdu)->cur_type != attr->type) {
292 if (garp_cb(app->pdu)->cur_type &&
293 garp_pdu_append_end_mark(app) < 0)
294 goto queue;
295 if (garp_pdu_append_msg(app, attr->type) < 0)
296 goto queue;
297 }
298
299 len = sizeof(*ga) + attr->dlen;
300 if (skb_tailroom(app->pdu) < len)
301 goto queue;
302 ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len);
303 ga->len = len;
304 ga->event = event;
305 memcpy(ga->data, attr->data, attr->dlen);
306 return 0;
307
308queue:
309 garp_pdu_queue(app);
310 goto again;
311}
312
313static void garp_attr_event(struct garp_applicant *app,
314 struct garp_attr *attr, enum garp_event event)
315{
316 enum garp_applicant_state state;
317
318 state = garp_applicant_state_table[attr->state][event].state;
319 if (state == GARP_APPLICANT_INVALID)
320 return;
321
322 switch (garp_applicant_state_table[attr->state][event].action) {
323 case GARP_ACTION_NONE:
324 break;
325 case GARP_ACTION_S_JOIN_IN:
326 garp_pdu_append_attr(app, attr, GARP_JOIN_IN);
327 break;
328 case GARP_ACTION_S_LEAVE_EMPTY:
329 garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY);
330 /* As a pure applicant, sending a leave message implies that
331 * the attribute was unregistered and can be destroyed. */
332 garp_attr_destroy(app, attr);
333 return;
334 default:
335 WARN_ON(1);
336 }
337
338 attr->state = state;
339}
340
341int garp_request_join(const struct net_device *dev,
342 const struct garp_application *appl,
343 const void *data, u8 len, u8 type)
344{
345 struct garp_port *port = dev->garp_port;
346 struct garp_applicant *app = port->applicants[appl->type];
347 struct garp_attr *attr;
348
349 spin_lock_bh(&app->lock);
350 attr = garp_attr_create(app, data, len, type);
351 if (!attr) {
352 spin_unlock_bh(&app->lock);
353 return -ENOMEM;
354 }
355 garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN);
356 spin_unlock_bh(&app->lock);
357 return 0;
358}
359EXPORT_SYMBOL_GPL(garp_request_join);
360
361void garp_request_leave(const struct net_device *dev,
362 const struct garp_application *appl,
363 const void *data, u8 len, u8 type)
364{
365 struct garp_port *port = dev->garp_port;
366 struct garp_applicant *app = port->applicants[appl->type];
367 struct garp_attr *attr;
368
369 spin_lock_bh(&app->lock);
370 attr = garp_attr_lookup(app, data, len, type);
371 if (!attr) {
372 spin_unlock_bh(&app->lock);
373 return;
374 }
375 garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE);
376 spin_unlock_bh(&app->lock);
377}
378EXPORT_SYMBOL_GPL(garp_request_leave);
379
380static void garp_gid_event(struct garp_applicant *app, enum garp_event event)
381{
382 struct rb_node *node, *next;
383 struct garp_attr *attr;
384
385 for (node = rb_first(&app->gid);
386 next = node ? rb_next(node) : NULL, node != NULL;
387 node = next) {
388 attr = rb_entry(node, struct garp_attr, node);
389 garp_attr_event(app, attr, event);
390 }
391}
392
393static void garp_join_timer_arm(struct garp_applicant *app)
394{
395 unsigned long delay;
396
397 delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32;
398 mod_timer(&app->join_timer, jiffies + delay);
399}
400
401static void garp_join_timer(unsigned long data)
402{
403 struct garp_applicant *app = (struct garp_applicant *)data;
404
405 spin_lock(&app->lock);
406 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
407 garp_pdu_queue(app);
408 spin_unlock(&app->lock);
409
410 garp_queue_xmit(app);
411 garp_join_timer_arm(app);
412}
413
414static int garp_pdu_parse_end_mark(struct sk_buff *skb)
415{
416 if (!pskb_may_pull(skb, sizeof(u8)))
417 return -1;
418 if (*skb->data == GARP_END_MARK) {
419 skb_pull(skb, sizeof(u8));
420 return -1;
421 }
422 return 0;
423}
424
425static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb,
426 u8 attrtype)
427{
428 const struct garp_attr_hdr *ga;
429 struct garp_attr *attr;
430 enum garp_event event;
431 unsigned int dlen;
432
433 if (!pskb_may_pull(skb, sizeof(*ga)))
434 return -1;
435 ga = (struct garp_attr_hdr *)skb->data;
436 if (ga->len < sizeof(*ga))
437 return -1;
438
439 if (!pskb_may_pull(skb, ga->len))
440 return -1;
441 skb_pull(skb, ga->len);
442 dlen = sizeof(*ga) - ga->len;
443
444 if (attrtype > app->app->maxattr)
445 return 0;
446
447 switch (ga->event) {
448 case GARP_LEAVE_ALL:
449 if (dlen != 0)
450 return -1;
451 garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY);
452 return 0;
453 case GARP_JOIN_EMPTY:
454 event = GARP_EVENT_R_JOIN_EMPTY;
455 break;
456 case GARP_JOIN_IN:
457 event = GARP_EVENT_R_JOIN_IN;
458 break;
459 case GARP_LEAVE_EMPTY:
460 event = GARP_EVENT_R_LEAVE_EMPTY;
461 break;
462 case GARP_EMPTY:
463 event = GARP_EVENT_R_EMPTY;
464 break;
465 default:
466 return 0;
467 }
468
469 if (dlen == 0)
470 return -1;
471 attr = garp_attr_lookup(app, ga->data, dlen, attrtype);
472 if (attr == NULL)
473 return 0;
474 garp_attr_event(app, attr, event);
475 return 0;
476}
477
478static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb)
479{
480 const struct garp_msg_hdr *gm;
481
482 if (!pskb_may_pull(skb, sizeof(*gm)))
483 return -1;
484 gm = (struct garp_msg_hdr *)skb->data;
485 if (gm->attrtype == 0)
486 return -1;
487 skb_pull(skb, sizeof(*gm));
488
489 while (skb->len > 0) {
490 if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0)
491 return -1;
492 if (garp_pdu_parse_end_mark(skb) < 0)
493 break;
494 }
495 return 0;
496}
497
498static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb,
499 struct net_device *dev)
500{
501 struct garp_application *appl = proto->data;
502 struct garp_port *port;
503 struct garp_applicant *app;
504 const struct garp_pdu_hdr *gp;
505
506 port = rcu_dereference(dev->garp_port);
507 if (!port)
508 goto err;
509 app = rcu_dereference(port->applicants[appl->type]);
510 if (!app)
511 goto err;
512
513 if (!pskb_may_pull(skb, sizeof(*gp)))
514 goto err;
515 gp = (struct garp_pdu_hdr *)skb->data;
516 if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID))
517 goto err;
518 skb_pull(skb, sizeof(*gp));
519
520 spin_lock(&app->lock);
521 while (skb->len > 0) {
522 if (garp_pdu_parse_msg(app, skb) < 0)
523 break;
524 if (garp_pdu_parse_end_mark(skb) < 0)
525 break;
526 }
527 spin_unlock(&app->lock);
528err:
529 kfree_skb(skb);
530}
531
532static int garp_init_port(struct net_device *dev)
533{
534 struct garp_port *port;
535
536 port = kzalloc(sizeof(*port), GFP_KERNEL);
537 if (!port)
538 return -ENOMEM;
539 rcu_assign_pointer(dev->garp_port, port);
540 return 0;
541}
542
543static void garp_release_port(struct net_device *dev)
544{
545 struct garp_port *port = dev->garp_port;
546 unsigned int i;
547
548 for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
549 if (port->applicants[i])
550 return;
551 }
552 rcu_assign_pointer(dev->garp_port, NULL);
553 synchronize_rcu();
554 kfree(port);
555}
556
557int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
558{
559 struct garp_applicant *app;
560 int err;
561
562 ASSERT_RTNL();
563
564 if (!dev->garp_port) {
565 err = garp_init_port(dev);
566 if (err < 0)
567 goto err1;
568 }
569
570 err = -ENOMEM;
571 app = kzalloc(sizeof(*app), GFP_KERNEL);
572 if (!app)
573 goto err2;
574
575 err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0);
576 if (err < 0)
577 goto err3;
578
579 app->dev = dev;
580 app->app = appl;
581 app->gid = RB_ROOT;
582 spin_lock_init(&app->lock);
583 skb_queue_head_init(&app->queue);
584 rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
585 setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app);
586 garp_join_timer_arm(app);
587 return 0;
588
589err3:
590 kfree(app);
591err2:
592 garp_release_port(dev);
593err1:
594 return err;
595}
596EXPORT_SYMBOL_GPL(garp_init_applicant);
597
598void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
599{
600 struct garp_port *port = dev->garp_port;
601 struct garp_applicant *app = port->applicants[appl->type];
602
603 ASSERT_RTNL();
604
605 rcu_assign_pointer(port->applicants[appl->type], NULL);
606 synchronize_rcu();
607
608 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
609 * all pending messages before the applicant is gone. */
610 del_timer_sync(&app->join_timer);
611 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
612 garp_pdu_queue(app);
613 garp_queue_xmit(app);
614
615 dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0);
616 kfree(app);
617 garp_release_port(dev);
618}
619EXPORT_SYMBOL_GPL(garp_uninit_applicant);
620
621int garp_register_application(struct garp_application *appl)
622{
623 appl->proto.rcv = garp_pdu_rcv;
624 appl->proto.data = appl;
625 return stp_proto_register(&appl->proto);
626}
627EXPORT_SYMBOL_GPL(garp_register_application);
628
629void garp_unregister_application(struct garp_application *appl)
630{
631 stp_proto_unregister(&appl->proto);
632}
633EXPORT_SYMBOL_GPL(garp_unregister_application);
diff --git a/net/802/stp.c b/net/802/stp.c
new file mode 100644
index 000000000000..0b7a24452d11
--- /dev/null
+++ b/net/802/stp.c
@@ -0,0 +1,102 @@
1/*
2 * STP SAP demux
3 *
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10#include <linux/mutex.h>
11#include <linux/skbuff.h>
12#include <linux/etherdevice.h>
13#include <linux/llc.h>
14#include <net/llc.h>
15#include <net/llc_pdu.h>
16#include <net/stp.h>
17
18/* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */
19#define GARP_ADDR_MIN 0x20
20#define GARP_ADDR_MAX 0x2F
21#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
22
23static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
24static const struct stp_proto *stp_proto __read_mostly;
25
26static struct llc_sap *sap __read_mostly;
27static unsigned int sap_registered;
28static DEFINE_MUTEX(stp_proto_mutex);
29
30/* Called under rcu_read_lock from LLC */
31static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
32 struct packet_type *pt, struct net_device *orig_dev)
33{
34 const struct ethhdr *eh = eth_hdr(skb);
35 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
36 const struct stp_proto *proto;
37
38 if (pdu->ssap != LLC_SAP_BSPAN ||
39 pdu->dsap != LLC_SAP_BSPAN ||
40 pdu->ctrl_1 != LLC_PDU_TYPE_U)
41 goto err;
42
43 if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) {
44 proto = rcu_dereference(garp_protos[eh->h_dest[5] -
45 GARP_ADDR_MIN]);
46 if (proto &&
47 compare_ether_addr(eh->h_dest, proto->group_address))
48 goto err;
49 } else
50 proto = rcu_dereference(stp_proto);
51
52 if (!proto)
53 goto err;
54
55 proto->rcv(proto, skb, dev);
56 return 0;
57
58err:
59 kfree_skb(skb);
60 return 0;
61}
62
63int stp_proto_register(const struct stp_proto *proto)
64{
65 int err = 0;
66
67 mutex_lock(&stp_proto_mutex);
68 if (sap_registered++ == 0) {
69 sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv);
70 if (!sap) {
71 err = -ENOMEM;
72 goto out;
73 }
74 }
75 if (is_zero_ether_addr(proto->group_address))
76 rcu_assign_pointer(stp_proto, proto);
77 else
78 rcu_assign_pointer(garp_protos[proto->group_address[5] -
79 GARP_ADDR_MIN], proto);
80out:
81 mutex_unlock(&stp_proto_mutex);
82 return err;
83}
84EXPORT_SYMBOL_GPL(stp_proto_register);
85
86void stp_proto_unregister(const struct stp_proto *proto)
87{
88 mutex_lock(&stp_proto_mutex);
89 if (is_zero_ether_addr(proto->group_address))
90 rcu_assign_pointer(stp_proto, NULL);
91 else
92 rcu_assign_pointer(garp_protos[proto->group_address[5] -
93 GARP_ADDR_MIN], NULL);
94 synchronize_rcu();
95
96 if (--sap_registered == 0)
97 llc_sap_put(sap);
98 mutex_unlock(&stp_proto_mutex);
99}
100EXPORT_SYMBOL_GPL(stp_proto_unregister);
101
102MODULE_LICENSE("GPL");
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig
index c4a382e450e2..fa073a54963e 100644
--- a/net/8021q/Kconfig
+++ b/net/8021q/Kconfig
@@ -17,3 +17,13 @@ config VLAN_8021Q
17 will be called 8021q. 17 will be called 8021q.
18 18
19 If unsure, say N. 19 If unsure, say N.
20
21config VLAN_8021Q_GVRP
22 bool "GVRP (GARP VLAN Registration Protocol) support"
23 depends on VLAN_8021Q
24 select GARP
25 help
26 Select this to enable GVRP end-system support. GVRP is used for
27 automatic propagation of registered VLANs to switches.
28
29 If unsure, say N.
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
index 10ca7f486c3a..3006e9ed7b08 100644
--- a/net/8021q/Makefile
+++ b/net/8021q/Makefile
@@ -4,9 +4,6 @@
4 4
5obj-$(CONFIG_VLAN_8021Q) += 8021q.o 5obj-$(CONFIG_VLAN_8021Q) += 8021q.o
6 6
78021q-objs := vlan.o vlan_dev.o vlan_netlink.o 78021q-y := vlan.o vlan_dev.o vlan_netlink.o
8 88021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
9ifeq ($(CONFIG_PROC_FS),y) 98021q-$(CONFIG_PROC_FS) += vlanproc.o
108021q-objs += vlanproc.o
11endif
12
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ab2225da0ee2..b529110c9355 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -165,8 +165,12 @@ void unregister_vlan_dev(struct net_device *dev)
165 165
166 synchronize_net(); 166 synchronize_net();
167 167
168 unregister_netdevice(dev);
169
168 /* If the group is now empty, kill off the group. */ 170 /* If the group is now empty, kill off the group. */
169 if (grp->nr_vlans == 0) { 171 if (grp->nr_vlans == 0) {
172 vlan_gvrp_uninit_applicant(real_dev);
173
170 if (real_dev->features & NETIF_F_HW_VLAN_RX) 174 if (real_dev->features & NETIF_F_HW_VLAN_RX)
171 real_dev->vlan_rx_register(real_dev, NULL); 175 real_dev->vlan_rx_register(real_dev, NULL);
172 176
@@ -178,8 +182,6 @@ void unregister_vlan_dev(struct net_device *dev)
178 182
179 /* Get rid of the vlan's reference to real_dev */ 183 /* Get rid of the vlan's reference to real_dev */
180 dev_put(real_dev); 184 dev_put(real_dev);
181
182 unregister_netdevice(dev);
183} 185}
184 186
185static void vlan_transfer_operstate(const struct net_device *dev, 187static void vlan_transfer_operstate(const struct net_device *dev,
@@ -249,15 +251,18 @@ int register_vlan_dev(struct net_device *dev)
249 ngrp = grp = vlan_group_alloc(real_dev); 251 ngrp = grp = vlan_group_alloc(real_dev);
250 if (!grp) 252 if (!grp)
251 return -ENOBUFS; 253 return -ENOBUFS;
254 err = vlan_gvrp_init_applicant(real_dev);
255 if (err < 0)
256 goto out_free_group;
252 } 257 }
253 258
254 err = vlan_group_prealloc_vid(grp, vlan_id); 259 err = vlan_group_prealloc_vid(grp, vlan_id);
255 if (err < 0) 260 if (err < 0)
256 goto out_free_group; 261 goto out_uninit_applicant;
257 262
258 err = register_netdevice(dev); 263 err = register_netdevice(dev);
259 if (err < 0) 264 if (err < 0)
260 goto out_free_group; 265 goto out_uninit_applicant;
261 266
262 /* Account for reference in struct vlan_dev_info */ 267 /* Account for reference in struct vlan_dev_info */
263 dev_hold(real_dev); 268 dev_hold(real_dev);
@@ -278,6 +283,9 @@ int register_vlan_dev(struct net_device *dev)
278 283
279 return 0; 284 return 0;
280 285
286out_uninit_applicant:
287 if (ngrp)
288 vlan_gvrp_uninit_applicant(real_dev);
281out_free_group: 289out_free_group:
282 if (ngrp) 290 if (ngrp)
283 vlan_group_free(ngrp); 291 vlan_group_free(ngrp);
@@ -591,9 +599,9 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
591 err = -EPERM; 599 err = -EPERM;
592 if (!capable(CAP_NET_ADMIN)) 600 if (!capable(CAP_NET_ADMIN))
593 break; 601 break;
594 err = vlan_dev_set_vlan_flag(dev, 602 err = vlan_dev_change_flags(dev,
595 args.u.flag, 603 args.vlan_qos ? args.u.flag : 0,
596 args.vlan_qos); 604 args.u.flag);
597 break; 605 break;
598 606
599 case SET_VLAN_NAME_TYPE_CMD: 607 case SET_VLAN_NAME_TYPE_CMD:
@@ -713,14 +721,20 @@ static int __init vlan_proto_init(void)
713 if (err < 0) 721 if (err < 0)
714 goto err2; 722 goto err2;
715 723
716 err = vlan_netlink_init(); 724 err = vlan_gvrp_init();
717 if (err < 0) 725 if (err < 0)
718 goto err3; 726 goto err3;
719 727
728 err = vlan_netlink_init();
729 if (err < 0)
730 goto err4;
731
720 dev_add_pack(&vlan_packet_type); 732 dev_add_pack(&vlan_packet_type);
721 vlan_ioctl_set(vlan_ioctl_handler); 733 vlan_ioctl_set(vlan_ioctl_handler);
722 return 0; 734 return 0;
723 735
736err4:
737 vlan_gvrp_uninit();
724err3: 738err3:
725 unregister_netdevice_notifier(&vlan_notifier_block); 739 unregister_netdevice_notifier(&vlan_notifier_block);
726err2: 740err2:
@@ -745,8 +759,9 @@ static void __exit vlan_cleanup_module(void)
745 BUG_ON(!hlist_empty(&vlan_group_hash[i])); 759 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
746 760
747 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); 761 unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
748
749 synchronize_net(); 762 synchronize_net();
763
764 vlan_gvrp_uninit();
750} 765}
751 766
752module_init(vlan_proto_init); 767module_init(vlan_proto_init);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5229a72c7ea1..097b2e04c928 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -28,8 +28,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
28 u32 skb_prio, short vlan_prio); 28 u32 skb_prio, short vlan_prio);
29int vlan_dev_set_egress_priority(const struct net_device *dev, 29int vlan_dev_set_egress_priority(const struct net_device *dev,
30 u32 skb_prio, short vlan_prio); 30 u32 skb_prio, short vlan_prio);
31int vlan_dev_set_vlan_flag(const struct net_device *dev, 31int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
32 u32 flag, short flag_val);
33void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); 32void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
34void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result); 33void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result);
35 34
@@ -38,6 +37,22 @@ void vlan_setup(struct net_device *dev);
38int register_vlan_dev(struct net_device *dev); 37int register_vlan_dev(struct net_device *dev);
39void unregister_vlan_dev(struct net_device *dev); 38void unregister_vlan_dev(struct net_device *dev);
40 39
40#ifdef CONFIG_VLAN_8021Q_GVRP
41extern int vlan_gvrp_request_join(const struct net_device *dev);
42extern void vlan_gvrp_request_leave(const struct net_device *dev);
43extern int vlan_gvrp_init_applicant(struct net_device *dev);
44extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
45extern int vlan_gvrp_init(void);
46extern void vlan_gvrp_uninit(void);
47#else
48static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
49static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
50static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; }
51static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {}
52static inline int vlan_gvrp_init(void) { return 0; }
53static inline void vlan_gvrp_uninit(void) {}
54#endif
55
41int vlan_netlink_init(void); 56int vlan_netlink_init(void);
42void vlan_netlink_fini(void); 57void vlan_netlink_fini(void);
43 58
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 5d055c242ed8..a0617bf7cec6 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -507,18 +507,23 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
507} 507}
508 508
509/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ 509/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
510int vlan_dev_set_vlan_flag(const struct net_device *dev, 510int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
511 u32 flag, short flag_val)
512{ 511{
513 /* verify flag is supported */ 512 struct vlan_dev_info *vlan = vlan_dev_info(dev);
514 if (flag == VLAN_FLAG_REORDER_HDR) { 513 u32 old_flags = vlan->flags;
515 if (flag_val) 514
516 vlan_dev_info(dev)->flags |= VLAN_FLAG_REORDER_HDR; 515 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
516 return -EINVAL;
517
518 vlan->flags = (old_flags & ~mask) | (flags & mask);
519
520 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
521 if (vlan->flags & VLAN_FLAG_GVRP)
522 vlan_gvrp_request_join(dev);
517 else 523 else
518 vlan_dev_info(dev)->flags &= ~VLAN_FLAG_REORDER_HDR; 524 vlan_gvrp_request_leave(dev);
519 return 0;
520 } 525 }
521 return -EINVAL; 526 return 0;
522} 527}
523 528
524void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) 529void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
@@ -552,12 +557,19 @@ static int vlan_dev_open(struct net_device *dev)
552 if (dev->flags & IFF_PROMISC) 557 if (dev->flags & IFF_PROMISC)
553 dev_set_promiscuity(real_dev, 1); 558 dev_set_promiscuity(real_dev, 1);
554 559
560 if (vlan->flags & VLAN_FLAG_GVRP)
561 vlan_gvrp_request_join(dev);
562
555 return 0; 563 return 0;
556} 564}
557 565
558static int vlan_dev_stop(struct net_device *dev) 566static int vlan_dev_stop(struct net_device *dev)
559{ 567{
560 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 568 struct vlan_dev_info *vlan = vlan_dev_info(dev);
569 struct net_device *real_dev = vlan->real_dev;
570
571 if (vlan->flags & VLAN_FLAG_GVRP)
572 vlan_gvrp_request_leave(dev);
561 573
562 dev_mc_unsync(real_dev, dev); 574 dev_mc_unsync(real_dev, dev);
563 dev_unicast_unsync(real_dev, dev); 575 dev_unicast_unsync(real_dev, dev);
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c
new file mode 100644
index 000000000000..db9781608362
--- /dev/null
+++ b/net/8021q/vlan_gvrp.c
@@ -0,0 +1,66 @@
1/*
2 * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP)
3 *
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10#include <linux/types.h>
11#include <linux/if_vlan.h>
12#include <net/garp.h>
13#include "vlan.h"
14
15#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
16
17enum gvrp_attributes {
18 GVRP_ATTR_INVALID,
19 GVRP_ATTR_VID,
20 __GVRP_ATTR_MAX
21};
22#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1)
23
24static struct garp_application vlan_gvrp_app __read_mostly = {
25 .proto.group_address = GARP_GVRP_ADDRESS,
26 .maxattr = GVRP_ATTR_MAX,
27 .type = GARP_APPLICATION_GVRP,
28};
29
30int vlan_gvrp_request_join(const struct net_device *dev)
31{
32 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
33 __be16 vid = htons(vlan->vlan_id);
34
35 return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
36 &vid, sizeof(vid), GVRP_ATTR_VID);
37}
38
39void vlan_gvrp_request_leave(const struct net_device *dev)
40{
41 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
42 __be16 vid = htons(vlan->vlan_id);
43
44 garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
45 &vid, sizeof(vid), GVRP_ATTR_VID);
46}
47
48int vlan_gvrp_init_applicant(struct net_device *dev)
49{
50 return garp_init_applicant(dev, &vlan_gvrp_app);
51}
52
53void vlan_gvrp_uninit_applicant(struct net_device *dev)
54{
55 garp_uninit_applicant(dev, &vlan_gvrp_app);
56}
57
58int __init vlan_gvrp_init(void)
59{
60 return garp_register_application(&vlan_gvrp_app);
61}
62
63void vlan_gvrp_uninit(void)
64{
65 garp_unregister_application(&vlan_gvrp_app);
66}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index c93e69ec28ed..e9c91dcecc9b 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -59,7 +59,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
59 } 59 }
60 if (data[IFLA_VLAN_FLAGS]) { 60 if (data[IFLA_VLAN_FLAGS]) {
61 flags = nla_data(data[IFLA_VLAN_FLAGS]); 61 flags = nla_data(data[IFLA_VLAN_FLAGS]);
62 if ((flags->flags & flags->mask) & ~VLAN_FLAG_REORDER_HDR) 62 if ((flags->flags & flags->mask) &
63 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
63 return -EINVAL; 64 return -EINVAL;
64 } 65 }
65 66
@@ -75,7 +76,6 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
75static int vlan_changelink(struct net_device *dev, 76static int vlan_changelink(struct net_device *dev,
76 struct nlattr *tb[], struct nlattr *data[]) 77 struct nlattr *tb[], struct nlattr *data[])
77{ 78{
78 struct vlan_dev_info *vlan = vlan_dev_info(dev);
79 struct ifla_vlan_flags *flags; 79 struct ifla_vlan_flags *flags;
80 struct ifla_vlan_qos_mapping *m; 80 struct ifla_vlan_qos_mapping *m;
81 struct nlattr *attr; 81 struct nlattr *attr;
@@ -83,8 +83,7 @@ static int vlan_changelink(struct net_device *dev,
83 83
84 if (data[IFLA_VLAN_FLAGS]) { 84 if (data[IFLA_VLAN_FLAGS]) {
85 flags = nla_data(data[IFLA_VLAN_FLAGS]); 85 flags = nla_data(data[IFLA_VLAN_FLAGS]);
86 vlan->flags = (vlan->flags & ~flags->mask) | 86 vlan_dev_change_flags(dev, flags->flags, flags->mask);
87 (flags->flags & flags->mask);
88 } 87 }
89 if (data[IFLA_VLAN_INGRESS_QOS]) { 88 if (data[IFLA_VLAN_INGRESS_QOS]) {
90 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { 89 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
diff --git a/net/Kconfig b/net/Kconfig
index acbf7c60e89b..b98668751749 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -181,6 +181,7 @@ source "net/dccp/Kconfig"
181source "net/sctp/Kconfig" 181source "net/sctp/Kconfig"
182source "net/tipc/Kconfig" 182source "net/tipc/Kconfig"
183source "net/atm/Kconfig" 183source "net/atm/Kconfig"
184source "net/802/Kconfig"
184source "net/bridge/Kconfig" 185source "net/bridge/Kconfig"
185source "net/8021q/Kconfig" 186source "net/8021q/Kconfig"
186source "net/decnet/Kconfig" 187source "net/decnet/Kconfig"
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 6afa77d63bb5..82e85abc303d 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -9,7 +9,7 @@
9#include "signaling.h" 9#include "signaling.h"
10#include "addr.h" 10#include "addr.h"
11 11
12static int check_addr(struct sockaddr_atmsvc *addr) 12static int check_addr(const struct sockaddr_atmsvc *addr)
13{ 13{
14 int i; 14 int i;
15 15
@@ -23,7 +23,7 @@ static int check_addr(struct sockaddr_atmsvc *addr)
23 return -EINVAL; 23 return -EINVAL;
24} 24}
25 25
26static int identical(struct sockaddr_atmsvc *a, struct sockaddr_atmsvc *b) 26static int identical(const struct sockaddr_atmsvc *a, const struct sockaddr_atmsvc *b)
27{ 27{
28 if (*a->sas_addr.prv) 28 if (*a->sas_addr.prv)
29 if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN)) 29 if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN))
@@ -35,7 +35,7 @@ static int identical(struct sockaddr_atmsvc *a, struct sockaddr_atmsvc *b)
35 return !strcmp(a->sas_addr.pub, b->sas_addr.pub); 35 return !strcmp(a->sas_addr.pub, b->sas_addr.pub);
36} 36}
37 37
38static void notify_sigd(struct atm_dev *dev) 38static void notify_sigd(const struct atm_dev *dev)
39{ 39{
40 struct sockaddr_atmpvc pvc; 40 struct sockaddr_atmpvc pvc;
41 41
@@ -63,7 +63,7 @@ void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype)
63 notify_sigd(dev); 63 notify_sigd(dev);
64} 64}
65 65
66int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 66int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
67 enum atm_addr_type_t atype) 67 enum atm_addr_type_t atype)
68{ 68{
69 unsigned long flags; 69 unsigned long flags;
@@ -98,7 +98,7 @@ int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr,
98 return 0; 98 return 0;
99} 99}
100 100
101int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 101int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
102 enum atm_addr_type_t atype) 102 enum atm_addr_type_t atype)
103{ 103{
104 unsigned long flags; 104 unsigned long flags;
diff --git a/net/atm/addr.h b/net/atm/addr.h
index f39433ad45da..6837e9e7eb13 100644
--- a/net/atm/addr.h
+++ b/net/atm/addr.h
@@ -10,9 +10,9 @@
10#include <linux/atmdev.h> 10#include <linux/atmdev.h>
11 11
12void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type); 12void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type);
13int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 13int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
14 enum atm_addr_type_t type); 14 enum atm_addr_type_t type);
15int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, 15int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
16 enum atm_addr_type_t type); 16 enum atm_addr_type_t type);
17int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf, 17int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf,
18 size_t size, enum atm_addr_type_t type); 18 size_t size, enum atm_addr_type_t type);
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 05fafdc2eea3..8d9a6f158880 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -52,12 +52,12 @@ static void skb_debug(const struct sk_buff *skb)
52#define ETHERTYPE_IPV6 0x86, 0xdd 52#define ETHERTYPE_IPV6 0x86, 0xdd
53#define PAD_BRIDGED 0x00, 0x00 53#define PAD_BRIDGED 0x00, 0x00
54 54
55static unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 }; 55static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
56static unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 }; 56static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
57static unsigned char llc_oui_pid_pad[] = 57static const unsigned char llc_oui_pid_pad[] =
58 { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED }; 58 { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
59static unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 }; 59static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
60static unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 }; 60static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
61 61
62enum br2684_encaps { 62enum br2684_encaps {
63 e_vc = BR2684_ENCAPS_VC, 63 e_vc = BR2684_ENCAPS_VC,
@@ -217,8 +217,8 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
217 return 1; 217 return 1;
218} 218}
219 219
220static inline struct br2684_vcc *pick_outgoing_vcc(struct sk_buff *skb, 220static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
221 struct br2684_dev *brdev) 221 const struct br2684_dev *brdev)
222{ 222{
223 return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ 223 return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
224} 224}
diff --git a/net/atm/common.c b/net/atm/common.c
index c865517ba449..d34edbe754c8 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -262,7 +262,7 @@ static int adjust_tp(struct atm_trafprm *tp,unsigned char aal)
262} 262}
263 263
264 264
265static int check_ci(struct atm_vcc *vcc, short vpi, int vci) 265static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
266{ 266{
267 struct hlist_head *head = &vcc_hash[vci & 267 struct hlist_head *head = &vcc_hash[vci &
268 (VCC_HTABLE_SIZE - 1)]; 268 (VCC_HTABLE_SIZE - 1)];
@@ -290,7 +290,7 @@ static int check_ci(struct atm_vcc *vcc, short vpi, int vci)
290} 290}
291 291
292 292
293static int find_ci(struct atm_vcc *vcc, short *vpi, int *vci) 293static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
294{ 294{
295 static short p; /* poor man's per-device cache */ 295 static short p; /* poor man's per-device cache */
296 static int c; 296 static int c;
@@ -646,7 +646,7 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
646} 646}
647 647
648 648
649static int check_tp(struct atm_trafprm *tp) 649static int check_tp(const struct atm_trafprm *tp)
650{ 650{
651 /* @@@ Should be merged with adjust_tp */ 651 /* @@@ Should be merged with adjust_tp */
652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; 652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0;
@@ -663,7 +663,7 @@ static int check_tp(struct atm_trafprm *tp)
663} 663}
664 664
665 665
666static int check_qos(struct atm_qos *qos) 666static int check_qos(const struct atm_qos *qos)
667{ 667{
668 int error; 668 int error;
669 669
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 653aca3573ac..5799fb52365a 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -65,36 +65,36 @@ static int lec_close(struct net_device *dev);
65static struct net_device_stats *lec_get_stats(struct net_device *dev); 65static struct net_device_stats *lec_get_stats(struct net_device *dev);
66static void lec_init(struct net_device *dev); 66static void lec_init(struct net_device *dev);
67static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 67static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
68 unsigned char *mac_addr); 68 const unsigned char *mac_addr);
69static int lec_arp_remove(struct lec_priv *priv, 69static int lec_arp_remove(struct lec_priv *priv,
70 struct lec_arp_table *to_remove); 70 struct lec_arp_table *to_remove);
71/* LANE2 functions */ 71/* LANE2 functions */
72static void lane2_associate_ind(struct net_device *dev, u8 *mac_address, 72static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address,
73 u8 *tlvs, u32 sizeoftlvs); 73 const u8 *tlvs, u32 sizeoftlvs);
74static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, 74static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
75 u8 **tlvs, u32 *sizeoftlvs); 75 u8 **tlvs, u32 *sizeoftlvs);
76static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, 76static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
77 u8 *tlvs, u32 sizeoftlvs); 77 const u8 *tlvs, u32 sizeoftlvs);
78 78
79static int lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, 79static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
80 unsigned long permanent); 80 unsigned long permanent);
81static void lec_arp_check_empties(struct lec_priv *priv, 81static void lec_arp_check_empties(struct lec_priv *priv,
82 struct atm_vcc *vcc, struct sk_buff *skb); 82 struct atm_vcc *vcc, struct sk_buff *skb);
83static void lec_arp_destroy(struct lec_priv *priv); 83static void lec_arp_destroy(struct lec_priv *priv);
84static void lec_arp_init(struct lec_priv *priv); 84static void lec_arp_init(struct lec_priv *priv);
85static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 85static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
86 unsigned char *mac_to_find, 86 const unsigned char *mac_to_find,
87 int is_rdesc, 87 int is_rdesc,
88 struct lec_arp_table **ret_entry); 88 struct lec_arp_table **ret_entry);
89static void lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, 89static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
90 unsigned char *atm_addr, unsigned long remoteflag, 90 const unsigned char *atm_addr, unsigned long remoteflag,
91 unsigned int targetless_le_arp); 91 unsigned int targetless_le_arp);
92static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); 92static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
93static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); 93static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
94static void lec_set_flush_tran_id(struct lec_priv *priv, 94static void lec_set_flush_tran_id(struct lec_priv *priv,
95 unsigned char *atm_addr, 95 const unsigned char *atm_addr,
96 unsigned long tran_id); 96 unsigned long tran_id);
97static void lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, 97static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
98 struct atm_vcc *vcc, 98 struct atm_vcc *vcc,
99 void (*old_push) (struct atm_vcc *vcc, 99 void (*old_push) (struct atm_vcc *vcc,
100 struct sk_buff *skb)); 100 struct sk_buff *skb));
@@ -634,7 +634,7 @@ static struct atm_dev lecatm_dev = {
634 */ 634 */
635static int 635static int
636send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, 636send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
637 unsigned char *mac_addr, unsigned char *atm_addr, 637 const unsigned char *mac_addr, const unsigned char *atm_addr,
638 struct sk_buff *data) 638 struct sk_buff *data)
639{ 639{
640 struct sock *sk; 640 struct sock *sk;
@@ -705,10 +705,9 @@ static void lec_init(struct net_device *dev)
705 dev->set_multicast_list = lec_set_multicast_list; 705 dev->set_multicast_list = lec_set_multicast_list;
706 dev->do_ioctl = NULL; 706 dev->do_ioctl = NULL;
707 printk("%s: Initialized!\n", dev->name); 707 printk("%s: Initialized!\n", dev->name);
708 return;
709} 708}
710 709
711static unsigned char lec_ctrl_magic[] = { 710static const unsigned char lec_ctrl_magic[] = {
712 0xff, 711 0xff,
713 0x00, 712 0x00,
714 0x01, 713 0x01,
@@ -1276,7 +1275,7 @@ module_exit(lane_module_cleanup);
1276 * lec will be used. 1275 * lec will be used.
1277 * If dst_mac == NULL, targetless LE_ARP will be sent 1276 * If dst_mac == NULL, targetless LE_ARP will be sent
1278 */ 1277 */
1279static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, 1278static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force,
1280 u8 **tlvs, u32 *sizeoftlvs) 1279 u8 **tlvs, u32 *sizeoftlvs)
1281{ 1280{
1282 unsigned long flags; 1281 unsigned long flags;
@@ -1322,8 +1321,8 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force,
1322 * Returns 1 for success, 0 for failure (out of memory) 1321 * Returns 1 for success, 0 for failure (out of memory)
1323 * 1322 *
1324 */ 1323 */
1325static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, 1324static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1326 u8 *tlvs, u32 sizeoftlvs) 1325 const u8 *tlvs, u32 sizeoftlvs)
1327{ 1326{
1328 int retval; 1327 int retval;
1329 struct sk_buff *skb; 1328 struct sk_buff *skb;
@@ -1358,8 +1357,8 @@ static int lane2_associate_req(struct net_device *dev, u8 *lan_dst,
1358 * LANE2: 3.1.5, LE_ASSOCIATE.indication 1357 * LANE2: 3.1.5, LE_ASSOCIATE.indication
1359 * 1358 *
1360 */ 1359 */
1361static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, 1360static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1362 u8 *tlvs, u32 sizeoftlvs) 1361 const u8 *tlvs, u32 sizeoftlvs)
1363{ 1362{
1364#if 0 1363#if 0
1365 int i = 0; 1364 int i = 0;
@@ -1744,7 +1743,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
1744 * Find entry by mac_address 1743 * Find entry by mac_address
1745 */ 1744 */
1746static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, 1745static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1747 unsigned char *mac_addr) 1746 const unsigned char *mac_addr)
1748{ 1747{
1749 struct hlist_node *node; 1748 struct hlist_node *node;
1750 struct hlist_head *head; 1749 struct hlist_head *head;
@@ -1764,7 +1763,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1764} 1763}
1765 1764
1766static struct lec_arp_table *make_entry(struct lec_priv *priv, 1765static struct lec_arp_table *make_entry(struct lec_priv *priv,
1767 unsigned char *mac_addr) 1766 const unsigned char *mac_addr)
1768{ 1767{
1769 struct lec_arp_table *to_return; 1768 struct lec_arp_table *to_return;
1770 1769
@@ -1921,7 +1920,7 @@ restart:
1921 * 1920 *
1922 */ 1921 */
1923static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 1922static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1924 unsigned char *mac_to_find, int is_rdesc, 1923 const unsigned char *mac_to_find, int is_rdesc,
1925 struct lec_arp_table **ret_entry) 1924 struct lec_arp_table **ret_entry)
1926{ 1925{
1927 unsigned long flags; 1926 unsigned long flags;
@@ -2017,7 +2016,7 @@ out:
2017} 2016}
2018 2017
2019static int 2018static int
2020lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, 2019lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
2021 unsigned long permanent) 2020 unsigned long permanent)
2022{ 2021{
2023 unsigned long flags; 2022 unsigned long flags;
@@ -2047,8 +2046,8 @@ lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr,
2047 * Notifies: Response to arp_request (atm_addr != NULL) 2046 * Notifies: Response to arp_request (atm_addr != NULL)
2048 */ 2047 */
2049static void 2048static void
2050lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, 2049lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2051 unsigned char *atm_addr, unsigned long remoteflag, 2050 const unsigned char *atm_addr, unsigned long remoteflag,
2052 unsigned int targetless_le_arp) 2051 unsigned int targetless_le_arp)
2053{ 2052{
2054 unsigned long flags; 2053 unsigned long flags;
@@ -2148,7 +2147,7 @@ out:
2148 * Notifies: Vcc setup ready 2147 * Notifies: Vcc setup ready
2149 */ 2148 */
2150static void 2149static void
2151lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, 2150lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2152 struct atm_vcc *vcc, 2151 struct atm_vcc *vcc,
2153 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) 2152 void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
2154{ 2153{
@@ -2336,7 +2335,7 @@ restart:
2336 2335
2337static void 2336static void
2338lec_set_flush_tran_id(struct lec_priv *priv, 2337lec_set_flush_tran_id(struct lec_priv *priv,
2339 unsigned char *atm_addr, unsigned long tran_id) 2338 const unsigned char *atm_addr, unsigned long tran_id)
2340{ 2339{
2341 unsigned long flags; 2340 unsigned long flags;
2342 struct hlist_node *node; 2341 struct hlist_node *node;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index b41cda7ea1e1..0d376682c1a3 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -42,12 +42,12 @@ struct lecdatahdr_8025 {
42 * 42 *
43 */ 43 */
44struct lane2_ops { 44struct lane2_ops {
45 int (*resolve) (struct net_device *dev, u8 *dst_mac, int force, 45 int (*resolve) (struct net_device *dev, const u8 *dst_mac, int force,
46 u8 **tlvs, u32 *sizeoftlvs); 46 u8 **tlvs, u32 *sizeoftlvs);
47 int (*associate_req) (struct net_device *dev, u8 *lan_dst, 47 int (*associate_req) (struct net_device *dev, const u8 *lan_dst,
48 u8 *tlvs, u32 sizeoftlvs); 48 const u8 *tlvs, u32 sizeoftlvs);
49 void (*associate_indicator) (struct net_device *dev, u8 *mac_addr, 49 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
50 u8 *tlvs, u32 sizeoftlvs); 50 const u8 *tlvs, u32 sizeoftlvs);
51}; 51};
52 52
53/* 53/*
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2712544cf0ca..97eaa23ad9ea 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -893,13 +893,11 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
893 893
894 sk->sk_destruct = ax25_free_sock; 894 sk->sk_destruct = ax25_free_sock;
895 sk->sk_type = osk->sk_type; 895 sk->sk_type = osk->sk_type;
896 sk->sk_socket = osk->sk_socket;
897 sk->sk_priority = osk->sk_priority; 896 sk->sk_priority = osk->sk_priority;
898 sk->sk_protocol = osk->sk_protocol; 897 sk->sk_protocol = osk->sk_protocol;
899 sk->sk_rcvbuf = osk->sk_rcvbuf; 898 sk->sk_rcvbuf = osk->sk_rcvbuf;
900 sk->sk_sndbuf = osk->sk_sndbuf; 899 sk->sk_sndbuf = osk->sk_sndbuf;
901 sk->sk_state = TCP_ESTABLISHED; 900 sk->sk_state = TCP_ESTABLISHED;
902 sk->sk_sleep = osk->sk_sleep;
903 sock_copy_flags(sk, osk); 901 sock_copy_flags(sk, osk);
904 902
905 oax25 = ax25_sk(osk); 903 oax25 = ax25_sk(osk);
@@ -1361,13 +1359,11 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
1361 goto out; 1359 goto out;
1362 1360
1363 newsk = skb->sk; 1361 newsk = skb->sk;
1364 newsk->sk_socket = newsock; 1362 sock_graft(newsk, newsock);
1365 newsk->sk_sleep = &newsock->wait;
1366 1363
1367 /* Now attach up the new socket */ 1364 /* Now attach up the new socket */
1368 kfree_skb(skb); 1365 kfree_skb(skb);
1369 sk->sk_ack_backlog--; 1366 sk->sk_ack_backlog--;
1370 newsock->sk = newsk;
1371 newsock->state = SS_CONNECTED; 1367 newsock->state = SS_CONNECTED;
1372 1368
1373out: 1369out:
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 96e4b9273250..cdc7e751ef36 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -39,11 +39,9 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
39 39
40 switch (ax25->state) { 40 switch (ax25->state) {
41 case AX25_STATE_0: 41 case AX25_STATE_0:
42 /* Magic here: If we listen() and a new link dies before it 42 if (!sk ||
43 is accepted() it isn't 'dead' so doesn't get removed. */ 43 sock_flag(sk, SOCK_DESTROY) ||
44 if (!sk || sock_flag(sk, SOCK_DESTROY) || 44 sock_flag(sk, SOCK_DEAD)) {
45 (sk->sk_state == TCP_LISTEN &&
46 sock_flag(sk, SOCK_DEAD))) {
47 if (sk) { 45 if (sk) {
48 sock_hold(sk); 46 sock_hold(sk);
49 ax25_destroy_socket(ax25); 47 ax25_destroy_socket(ax25);
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index e69244dd8de8..b69bf4e7c48b 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -16,10 +16,6 @@
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18 18
19/*
20 * $Id: bnep.h,v 1.5 2002/08/04 21:23:58 maxk Exp $
21 */
22
23#ifndef _BNEP_H 19#ifndef _BNEP_H
24#define _BNEP_H 20#define _BNEP_H
25 21
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index f85d94643aaf..1d98a1b80da7 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -25,10 +25,6 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/*
29 * $Id: core.c,v 1.20 2002/08/04 21:23:58 maxk Exp $
30 */
31
32#include <linux/module.h> 28#include <linux/module.h>
33 29
34#include <linux/kernel.h> 30#include <linux/kernel.h>
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 95e3837e4312..d9fa0ab2c87f 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,10 +25,6 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28/*
29 * $Id: netdev.c,v 1.8 2002/08/04 21:23:58 maxk Exp $
30 */
31
32#include <linux/module.h> 28#include <linux/module.h>
33 29
34#include <linux/socket.h> 30#include <linux/socket.h>
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 201e5b1ce473..8ffb57f2303a 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,10 +24,6 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27/*
28 * $Id: sock.c,v 1.4 2002/08/04 21:23:58 maxk Exp $
29 */
30
31#include <linux/module.h> 27#include <linux/module.h>
32 28
33#include <linux/types.h> 29#include <linux/types.h>
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 0c2c93735e93..b4fb84e398e5 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * Bluetooth RFCOMM core. 25 * Bluetooth RFCOMM core.
26 *
27 * $Id: core.c,v 1.42 2002/10/01 23:26:25 maxk Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5083adcbfae5..c9054487670a 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 *
27 * $Id: sock.c,v 1.24 2002/10/03 01:00:34 maxk Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c9191871c1e0..be84f4fc1477 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -23,8 +23,6 @@
23 23
24/* 24/*
25 * RFCOMM TTY. 25 * RFCOMM TTY.
26 *
27 * $Id: tty.c,v 1.24 2002/10/03 01:54:38 holtmann Exp $
28 */ 26 */
29 27
30#include <linux/module.h> 28#include <linux/module.h>
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 12265aff7099..e143ca678881 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -5,6 +5,7 @@
5config BRIDGE 5config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC 7 select LLC
8 select STP
8 ---help--- 9 ---help---
9 If you say Y here, then your Linux box will be able to act as an 10 If you say Y here, then your Linux box will be able to act as an
10 Ethernet bridge, which means that the different Ethernet segments it 11 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 8f3c58e5f7a5..573acdf6f9ff 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br.c,v 1.47 2001/12/24 00:56:41 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -20,21 +18,24 @@
20#include <linux/init.h> 18#include <linux/init.h>
21#include <linux/llc.h> 19#include <linux/llc.h>
22#include <net/llc.h> 20#include <net/llc.h>
21#include <net/stp.h>
23 22
24#include "br_private.h" 23#include "br_private.h"
25 24
26int (*br_should_route_hook)(struct sk_buff *skb); 25int (*br_should_route_hook)(struct sk_buff *skb);
27 26
28static struct llc_sap *br_stp_sap; 27static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv,
29};
29 30
30static int __init br_init(void) 31static int __init br_init(void)
31{ 32{
32 int err; 33 int err;
33 34
34 br_stp_sap = llc_sap_open(LLC_SAP_BSPAN, br_stp_rcv); 35 err = stp_proto_register(&br_stp_proto);
35 if (!br_stp_sap) { 36 if (err < 0) {
36 printk(KERN_ERR "bridge: can't register sap for STP\n"); 37 printk(KERN_ERR "bridge: can't register sap for STP\n");
37 return -EADDRINUSE; 38 return err;
38 } 39 }
39 40
40 err = br_fdb_init(); 41 err = br_fdb_init();
@@ -67,13 +68,13 @@ err_out2:
67err_out1: 68err_out1:
68 br_fdb_fini(); 69 br_fdb_fini();
69err_out: 70err_out:
70 llc_sap_put(br_stp_sap); 71 stp_proto_unregister(&br_stp_proto);
71 return err; 72 return err;
72} 73}
73 74
74static void __exit br_deinit(void) 75static void __exit br_deinit(void)
75{ 76{
76 rcu_assign_pointer(br_stp_sap->rcv_func, NULL); 77 stp_proto_unregister(&br_stp_proto);
77 78
78 br_netlink_fini(); 79 br_netlink_fini();
79 unregister_netdevice_notifier(&br_device_notifier); 80 unregister_netdevice_notifier(&br_device_notifier);
@@ -84,7 +85,6 @@ static void __exit br_deinit(void)
84 synchronize_net(); 85 synchronize_net();
85 86
86 br_netfilter_fini(); 87 br_netfilter_fini();
87 llc_sap_put(br_stp_sap);
88 br_fdb_get_hook = NULL; 88 br_fdb_get_hook = NULL;
89 br_fdb_put_hook = NULL; 89 br_fdb_put_hook = NULL;
90 90
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index bf7787395fe0..d9449df7cad5 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_device.c,v 1.6 2001/12/24 00:59:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -21,12 +19,6 @@
21#include <asm/uaccess.h> 19#include <asm/uaccess.h>
22#include "br_private.h" 20#include "br_private.h"
23 21
24static struct net_device_stats *br_dev_get_stats(struct net_device *dev)
25{
26 struct net_bridge *br = netdev_priv(dev);
27 return &br->statistics;
28}
29
30/* net device transmit always called with no BH (preempt_disabled) */ 22/* net device transmit always called with no BH (preempt_disabled) */
31int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 23int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
32{ 24{
@@ -34,8 +26,8 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
34 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
35 struct net_bridge_fdb_entry *dst; 27 struct net_bridge_fdb_entry *dst;
36 28
37 br->statistics.tx_packets++; 29 dev->stats.tx_packets++;
38 br->statistics.tx_bytes += skb->len; 30 dev->stats.tx_bytes += skb->len;
39 31
40 skb_reset_mac_header(skb); 32 skb_reset_mac_header(skb);
41 skb_pull(skb, ETH_HLEN); 33 skb_pull(skb, ETH_HLEN);
@@ -95,6 +87,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
95 spin_lock_bh(&br->lock); 87 spin_lock_bh(&br->lock);
96 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 88 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
97 br_stp_change_bridge_id(br, addr->sa_data); 89 br_stp_change_bridge_id(br, addr->sa_data);
90 br->flags |= BR_SET_MAC_ADDR;
98 spin_unlock_bh(&br->lock); 91 spin_unlock_bh(&br->lock);
99 92
100 return 0; 93 return 0;
@@ -161,7 +154,6 @@ void br_dev_setup(struct net_device *dev)
161 ether_setup(dev); 154 ether_setup(dev);
162 155
163 dev->do_ioctl = br_dev_ioctl; 156 dev->do_ioctl = br_dev_ioctl;
164 dev->get_stats = br_dev_get_stats;
165 dev->hard_start_xmit = br_dev_xmit; 157 dev->hard_start_xmit = br_dev_xmit;
166 dev->open = br_dev_open; 158 dev->open = br_dev_open;
167 dev->set_multicast_list = br_dev_set_multicast_list; 159 dev->set_multicast_list = br_dev_set_multicast_list;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 72c5976a5ce3..4de74cdd091d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_fdb.c,v 1.6 2002/01/17 00:57:07 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bdd7c35c3c7b..bdd9ccea17ce 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -91,7 +89,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
91/* called with rcu_read_lock */ 89/* called with rcu_read_lock */
92void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 90void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
93{ 91{
94 if (should_deliver(to, skb)) { 92 if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) {
95 __br_forward(to, skb); 93 __br_forward(to, skb);
96 return; 94 return;
97 } 95 }
@@ -115,7 +113,7 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
115 struct sk_buff *skb2; 113 struct sk_buff *skb2;
116 114
117 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { 115 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
118 br->statistics.tx_dropped++; 116 br->dev->stats.tx_dropped++;
119 kfree_skb(skb); 117 kfree_skb(skb);
120 return; 118 return;
121 } 119 }
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f38cc5317b88..497df086141a 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_if.c,v 1.7 2001/12/24 00:59:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -389,6 +387,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
389 goto err2; 387 goto err2;
390 388
391 rcu_assign_pointer(dev->br_port, p); 389 rcu_assign_pointer(dev->br_port, p);
390 dev_disable_lro(dev);
392 dev_set_promiscuity(dev, 1); 391 dev_set_promiscuity(dev, 1);
393 392
394 list_add_rcu(&p->list, &br->port_list); 393 list_add_rcu(&p->list, &br->port_list);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 255c00f60ce7..30b88777c3df 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_input.c,v 1.10 2001/12/24 04:50:20 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -24,13 +22,13 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
24 22
25static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) 23static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
26{ 24{
27 struct net_device *indev; 25 struct net_device *indev, *brdev = br->dev;
28 26
29 br->statistics.rx_packets++; 27 brdev->stats.rx_packets++;
30 br->statistics.rx_bytes += skb->len; 28 brdev->stats.rx_bytes += skb->len;
31 29
32 indev = skb->dev; 30 indev = skb->dev;
33 skb->dev = br->dev; 31 skb->dev = brdev;
34 32
35 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 33 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
36 netif_receive_skb); 34 netif_receive_skb);
@@ -64,7 +62,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
64 dst = NULL; 62 dst = NULL;
65 63
66 if (is_multicast_ether_addr(dest)) { 64 if (is_multicast_ether_addr(dest)) {
67 br->statistics.multicast++; 65 br->dev->stats.multicast++;
68 skb2 = skb; 66 skb2 = skb;
69 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 67 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
70 skb2 = skb; 68 skb2 = skb;
@@ -136,14 +134,11 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
136 if (skb->protocol == htons(ETH_P_PAUSE)) 134 if (skb->protocol == htons(ETH_P_PAUSE))
137 goto drop; 135 goto drop;
138 136
139 /* Process STP BPDU's through normal netif_receive_skb() path */ 137 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
140 if (p->br->stp_enabled != BR_NO_STP) { 138 NULL, br_handle_local_finish))
141 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 139 return NULL; /* frame consumed by filter */
142 NULL, br_handle_local_finish)) 140 else
143 return NULL; 141 return skb; /* continue processing */
144 else
145 return skb;
146 }
147 } 142 }
148 143
149 switch (p->state) { 144 switch (p->state) {
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 0655a5f07f58..eeee218eed80 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_ioctl.c,v 1.4 2000/11/08 05:16:40 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 00644a544e3c..88d8ec7b3142 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_notify.c,v 1.2 2000/02/21 15:51:34 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c11b554fd109..815ed38925b2 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Lennert Buytenhek <buytenh@gnu.org> 5 * Lennert Buytenhek <buytenh@gnu.org>
6 * 6 *
7 * $Id: br_private.h,v 1.7 2001/12/24 00:59:55 davem Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
@@ -90,11 +88,12 @@ struct net_bridge
90 spinlock_t lock; 88 spinlock_t lock;
91 struct list_head port_list; 89 struct list_head port_list;
92 struct net_device *dev; 90 struct net_device *dev;
93 struct net_device_stats statistics;
94 spinlock_t hash_lock; 91 spinlock_t hash_lock;
95 struct hlist_head hash[BR_HASH_SIZE]; 92 struct hlist_head hash[BR_HASH_SIZE];
96 struct list_head age_list; 93 struct list_head age_list;
97 unsigned long feature_mask; 94 unsigned long feature_mask;
95 unsigned long flags;
96#define BR_SET_MAC_ADDR 0x00000001
98 97
99 /* STP */ 98 /* STP */
100 bridge_id designated_root; 99 bridge_id designated_root;
@@ -227,8 +226,9 @@ extern void br_stp_set_path_cost(struct net_bridge_port *p,
227extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 226extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
228 227
229/* br_stp_bpdu.c */ 228/* br_stp_bpdu.c */
230extern int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, 229struct stp_proto;
231 struct packet_type *pt, struct net_device *orig_dev); 230extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
231 struct net_device *dev);
232 232
233/* br_stp_timer.c */ 233/* br_stp_timer.c */
234extern void br_stp_timer_init(struct net_bridge *br); 234extern void br_stp_timer_init(struct net_bridge *br);
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index e29f01ac1adf..8b650f7fbfa0 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -4,8 +4,6 @@
4 * Authors: 4 * Authors:
5 * Lennert Buytenhek <buytenh@gnu.org> 5 * Lennert Buytenhek <buytenh@gnu.org>
6 * 6 *
7 * $Id: br_private_stp.h,v 1.3 2001/02/05 06:03:47 davem Exp $
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index e38034aa56f5..284d1b2fa1ff 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp.c,v 1.4 2000/06/19 10:13:35 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index ddeb6e5d45d6..996476174517 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_bpdu.c,v 1.3 2001/11/10 02:35:25 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -20,6 +18,7 @@
20#include <net/net_namespace.h> 18#include <net/net_namespace.h>
21#include <net/llc.h> 19#include <net/llc.h>
22#include <net/llc_pdu.h> 20#include <net/llc_pdu.h>
21#include <net/stp.h>
23#include <asm/unaligned.h> 22#include <asm/unaligned.h>
24 23
25#include "br_private.h" 24#include "br_private.h"
@@ -133,10 +132,9 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
133 * 132 *
134 * NO locks, but rcu_read_lock (preempt_disabled) 133 * NO locks, but rcu_read_lock (preempt_disabled)
135 */ 134 */
136int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, 135void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
137 struct packet_type *pt, struct net_device *orig_dev) 136 struct net_device *dev)
138{ 137{
139 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
140 const unsigned char *dest = eth_hdr(skb)->h_dest; 138 const unsigned char *dest = eth_hdr(skb)->h_dest;
141 struct net_bridge_port *p = rcu_dereference(dev->br_port); 139 struct net_bridge_port *p = rcu_dereference(dev->br_port);
142 struct net_bridge *br; 140 struct net_bridge *br;
@@ -148,11 +146,6 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
148 if (!p) 146 if (!p)
149 goto err; 147 goto err;
150 148
151 if (pdu->ssap != LLC_SAP_BSPAN
152 || pdu->dsap != LLC_SAP_BSPAN
153 || pdu->ctrl_1 != LLC_PDU_TYPE_U)
154 goto err;
155
156 if (!pskb_may_pull(skb, 4)) 149 if (!pskb_may_pull(skb, 4))
157 goto err; 150 goto err;
158 151
@@ -226,5 +219,4 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
226 spin_unlock(&br->lock); 219 spin_unlock(&br->lock);
227 err: 220 err:
228 kfree_skb(skb); 221 kfree_skb(skb);
229 return 0;
230} 222}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 1a430eccec9b..9a52ac5b4525 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_if.c,v 1.4 2001/04/14 21:14:39 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -216,6 +214,10 @@ void br_stp_recalculate_bridge_id(struct net_bridge *br)
216 const unsigned char *addr = br_mac_zero; 214 const unsigned char *addr = br_mac_zero;
217 struct net_bridge_port *p; 215 struct net_bridge_port *p;
218 216
217 /* user has chosen a value so keep it */
218 if (br->flags & BR_SET_MAC_ADDR)
219 return;
220
219 list_for_each_entry(p, &br->port_list, list) { 221 list_for_each_entry(p, &br->port_list, list) {
220 if (addr == br_mac_zero || 222 if (addr == br_mac_zero ||
221 memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0) 223 memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 77f5255e6915..772a140bfdf0 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org> 6 * Lennert Buytenhek <buytenh@gnu.org>
7 * 7 *
8 * $Id: br_stp_timer.c,v 1.3 2000/05/05 02:17:17 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 7beeefa0f9c0..540df4106bec 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -83,6 +83,15 @@ config BRIDGE_EBT_IP
83 83
84 To compile it as a module, choose M here. If unsure, say N. 84 To compile it as a module, choose M here. If unsure, say N.
85 85
86config BRIDGE_EBT_IP6
87 tristate "ebt: IP6 filter support"
88 depends on BRIDGE_NF_EBTABLES && IPV6
89 help
90 This option adds the IP6 match, which allows basic IPV6 header field
91 filtering.
92
93 To compile it as a module, choose M here. If unsure, say N.
94
86config BRIDGE_EBT_LIMIT 95config BRIDGE_EBT_LIMIT
87 tristate "ebt: limit match support" 96 tristate "ebt: limit match support"
88 depends on BRIDGE_NF_EBTABLES 97 depends on BRIDGE_NF_EBTABLES
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 83715d73a503..0718699540b0 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_BRIDGE_EBT_802_3) += ebt_802_3.o
14obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o 14obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o
15obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o 15obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o
16obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o 16obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o
17obj-$(CONFIG_BRIDGE_EBT_IP6) += ebt_ip6.o
17obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o 18obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o
18obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o 19obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o
19obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o 20obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
new file mode 100644
index 000000000000..36efb3a75249
--- /dev/null
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -0,0 +1,144 @@
1/*
2 * ebt_ip6
3 *
4 * Authors:
5 * Manohar Castelino <manohar.r.castelino@intel.com>
6 * Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
7 * Jan Engelhardt <jengelh@computergmbh.de>
8 *
9 * Summary:
10 * This is just a modification of the IPv4 code written by
11 * Bart De Schuymer <bdschuym@pandora.be>
12 * with the changes required to support IPv6
13 *
14 * Jan, 2008
15 */
16
17#include <linux/netfilter_bridge/ebtables.h>
18#include <linux/netfilter_bridge/ebt_ip6.h>
19#include <linux/ipv6.h>
20#include <net/ipv6.h>
21#include <linux/in.h>
22#include <linux/module.h>
23#include <net/dsfield.h>
24
25struct tcpudphdr {
26 __be16 src;
27 __be16 dst;
28};
29
30static int ebt_filter_ip6(const struct sk_buff *skb,
31 const struct net_device *in,
32 const struct net_device *out, const void *data,
33 unsigned int datalen)
34{
35 const struct ebt_ip6_info *info = (struct ebt_ip6_info *)data;
36 const struct ipv6hdr *ih6;
37 struct ipv6hdr _ip6h;
38 const struct tcpudphdr *pptr;
39 struct tcpudphdr _ports;
40 struct in6_addr tmp_addr;
41 int i;
42
43 ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
44 if (ih6 == NULL)
45 return EBT_NOMATCH;
46 if (info->bitmask & EBT_IP6_TCLASS &&
47 FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
48 return EBT_NOMATCH;
49 for (i = 0; i < 4; i++)
50 tmp_addr.in6_u.u6_addr32[i] = ih6->saddr.in6_u.u6_addr32[i] &
51 info->smsk.in6_u.u6_addr32[i];
52 if (info->bitmask & EBT_IP6_SOURCE &&
53 FWINV((ipv6_addr_cmp(&tmp_addr, &info->saddr) != 0),
54 EBT_IP6_SOURCE))
55 return EBT_NOMATCH;
56 for (i = 0; i < 4; i++)
57 tmp_addr.in6_u.u6_addr32[i] = ih6->daddr.in6_u.u6_addr32[i] &
58 info->dmsk.in6_u.u6_addr32[i];
59 if (info->bitmask & EBT_IP6_DEST &&
60 FWINV((ipv6_addr_cmp(&tmp_addr, &info->daddr) != 0), EBT_IP6_DEST))
61 return EBT_NOMATCH;
62 if (info->bitmask & EBT_IP6_PROTO) {
63 uint8_t nexthdr = ih6->nexthdr;
64 int offset_ph;
65
66 offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr);
67 if (offset_ph == -1)
68 return EBT_NOMATCH;
69 if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
70 return EBT_NOMATCH;
71 if (!(info->bitmask & EBT_IP6_DPORT) &&
72 !(info->bitmask & EBT_IP6_SPORT))
73 return EBT_MATCH;
74 pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
75 &_ports);
76 if (pptr == NULL)
77 return EBT_NOMATCH;
78 if (info->bitmask & EBT_IP6_DPORT) {
79 u32 dst = ntohs(pptr->dst);
80 if (FWINV(dst < info->dport[0] ||
81 dst > info->dport[1], EBT_IP6_DPORT))
82 return EBT_NOMATCH;
83 }
84 if (info->bitmask & EBT_IP6_SPORT) {
85 u32 src = ntohs(pptr->src);
86 if (FWINV(src < info->sport[0] ||
87 src > info->sport[1], EBT_IP6_SPORT))
88 return EBT_NOMATCH;
89 }
90 return EBT_MATCH;
91 }
92 return EBT_MATCH;
93}
94
95static int ebt_ip6_check(const char *tablename, unsigned int hookmask,
96 const struct ebt_entry *e, void *data, unsigned int datalen)
97{
98 struct ebt_ip6_info *info = (struct ebt_ip6_info *)data;
99
100 if (datalen != EBT_ALIGN(sizeof(struct ebt_ip6_info)))
101 return -EINVAL;
102 if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
103 return -EINVAL;
104 if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
105 return -EINVAL;
106 if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
107 if (info->invflags & EBT_IP6_PROTO)
108 return -EINVAL;
109 if (info->protocol != IPPROTO_TCP &&
110 info->protocol != IPPROTO_UDP &&
111 info->protocol != IPPROTO_UDPLITE &&
112 info->protocol != IPPROTO_SCTP &&
113 info->protocol != IPPROTO_DCCP)
114 return -EINVAL;
115 }
116 if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
117 return -EINVAL;
118 if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
119 return -EINVAL;
120 return 0;
121}
122
123static struct ebt_match filter_ip6 =
124{
125 .name = EBT_IP6_MATCH,
126 .match = ebt_filter_ip6,
127 .check = ebt_ip6_check,
128 .me = THIS_MODULE,
129};
130
131static int __init ebt_ip6_init(void)
132{
133 return ebt_register_match(&filter_ip6);
134}
135
136static void __exit ebt_ip6_fini(void)
137{
138 ebt_unregister_match(&filter_ip6);
139}
140
141module_init(ebt_ip6_init);
142module_exit(ebt_ip6_fini);
143MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
144MODULE_LICENSE("GPL");
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 0b209e4aad0a..2f430d4ae911 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -18,6 +18,9 @@
18#include <linux/if_arp.h> 18#include <linux/if_arp.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <net/netfilter/nf_log.h> 20#include <net/netfilter/nf_log.h>
21#include <linux/ipv6.h>
22#include <net/ipv6.h>
23#include <linux/in6.h>
21 24
22static DEFINE_SPINLOCK(ebt_log_lock); 25static DEFINE_SPINLOCK(ebt_log_lock);
23 26
@@ -58,6 +61,27 @@ static void print_MAC(const unsigned char *p)
58 printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':'); 61 printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':');
59} 62}
60 63
64static void
65print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
66{
67 if (protocol == IPPROTO_TCP ||
68 protocol == IPPROTO_UDP ||
69 protocol == IPPROTO_UDPLITE ||
70 protocol == IPPROTO_SCTP ||
71 protocol == IPPROTO_DCCP) {
72 const struct tcpudphdr *pptr;
73 struct tcpudphdr _ports;
74
75 pptr = skb_header_pointer(skb, offset,
76 sizeof(_ports), &_ports);
77 if (pptr == NULL) {
78 printk(" INCOMPLETE TCP/UDP header");
79 return;
80 }
81 printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
82 }
83}
84
61#define myNIPQUAD(a) a[0], a[1], a[2], a[3] 85#define myNIPQUAD(a) a[0], a[1], a[2], a[3]
62static void 86static void
63ebt_log_packet(unsigned int pf, unsigned int hooknum, 87ebt_log_packet(unsigned int pf, unsigned int hooknum,
@@ -95,25 +119,35 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
95 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP " 119 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u, IP "
96 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), 120 "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr),
97 NIPQUAD(ih->daddr), ih->tos, ih->protocol); 121 NIPQUAD(ih->daddr), ih->tos, ih->protocol);
98 if (ih->protocol == IPPROTO_TCP || 122 print_ports(skb, ih->protocol, ih->ihl*4);
99 ih->protocol == IPPROTO_UDP || 123 goto out;
100 ih->protocol == IPPROTO_UDPLITE || 124 }
101 ih->protocol == IPPROTO_SCTP || 125
102 ih->protocol == IPPROTO_DCCP) { 126#if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE)
103 const struct tcpudphdr *pptr; 127 if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
104 struct tcpudphdr _ports; 128 htons(ETH_P_IPV6)) {
105 129 const struct ipv6hdr *ih;
106 pptr = skb_header_pointer(skb, ih->ihl*4, 130 struct ipv6hdr _iph;
107 sizeof(_ports), &_ports); 131 uint8_t nexthdr;
108 if (pptr == NULL) { 132 int offset_ph;
109 printk(" INCOMPLETE TCP/UDP header"); 133
110 goto out; 134 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
111 } 135 if (ih == NULL) {
112 printk(" SPT=%u DPT=%u", ntohs(pptr->src), 136 printk(" INCOMPLETE IPv6 header");
113 ntohs(pptr->dst)); 137 goto out;
114 } 138 }
139 printk(" IPv6 SRC=%x:%x:%x:%x:%x:%x:%x:%x "
140 "IPv6 DST=%x:%x:%x:%x:%x:%x:%x:%x, IPv6 "
141 "priority=0x%01X, Next Header=%d", NIP6(ih->saddr),
142 NIP6(ih->daddr), ih->priority, ih->nexthdr);
143 nexthdr = ih->nexthdr;
144 offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr);
145 if (offset_ph == -1)
146 goto out;
147 print_ports(skb, nexthdr, offset_ph);
115 goto out; 148 goto out;
116 } 149 }
150#endif
117 151
118 if ((bitmask & EBT_LOG_ARP) && 152 if ((bitmask & EBT_LOG_ARP) &&
119 ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) || 153 ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) ||
diff --git a/net/core/dev.c b/net/core/dev.c
index fca23a3bf12c..bfa9a6a951dd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -90,6 +90,7 @@
90#include <linux/if_ether.h> 90#include <linux/if_ether.h>
91#include <linux/netdevice.h> 91#include <linux/netdevice.h>
92#include <linux/etherdevice.h> 92#include <linux/etherdevice.h>
93#include <linux/ethtool.h>
93#include <linux/notifier.h> 94#include <linux/notifier.h>
94#include <linux/skbuff.h> 95#include <linux/skbuff.h>
95#include <net/net_namespace.h> 96#include <net/net_namespace.h>
@@ -961,6 +962,12 @@ void netdev_state_change(struct net_device *dev)
961 } 962 }
962} 963}
963 964
965void netdev_bonding_change(struct net_device *dev)
966{
967 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
968}
969EXPORT_SYMBOL(netdev_bonding_change);
970
964/** 971/**
965 * dev_load - load a network module 972 * dev_load - load a network module
966 * @net: the applicable net namespace 973 * @net: the applicable net namespace
@@ -1117,6 +1124,29 @@ int dev_close(struct net_device *dev)
1117} 1124}
1118 1125
1119 1126
1127/**
1128 * dev_disable_lro - disable Large Receive Offload on a device
1129 * @dev: device
1130 *
1131 * Disable Large Receive Offload (LRO) on a net device. Must be
1132 * called under RTNL. This is needed if received packets may be
1133 * forwarded to another interface.
1134 */
1135void dev_disable_lro(struct net_device *dev)
1136{
1137 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1138 dev->ethtool_ops->set_flags) {
1139 u32 flags = dev->ethtool_ops->get_flags(dev);
1140 if (flags & ETH_FLAG_LRO) {
1141 flags &= ~ETH_FLAG_LRO;
1142 dev->ethtool_ops->set_flags(dev, flags);
1143 }
1144 }
1145 WARN_ON(dev->features & NETIF_F_LRO);
1146}
1147EXPORT_SYMBOL(dev_disable_lro);
1148
1149
1120static int dev_boot_phase = 1; 1150static int dev_boot_phase = 1;
1121 1151
1122/* 1152/*
@@ -2769,16 +2799,29 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2769 return 0; 2799 return 0;
2770} 2800}
2771 2801
2772static void __dev_set_promiscuity(struct net_device *dev, int inc) 2802static int __dev_set_promiscuity(struct net_device *dev, int inc)
2773{ 2803{
2774 unsigned short old_flags = dev->flags; 2804 unsigned short old_flags = dev->flags;
2775 2805
2776 ASSERT_RTNL(); 2806 ASSERT_RTNL();
2777 2807
2778 if ((dev->promiscuity += inc) == 0) 2808 dev->flags |= IFF_PROMISC;
2779 dev->flags &= ~IFF_PROMISC; 2809 dev->promiscuity += inc;
2780 else 2810 if (dev->promiscuity == 0) {
2781 dev->flags |= IFF_PROMISC; 2811 /*
2812 * Avoid overflow.
2813 * If inc causes overflow, untouch promisc and return error.
2814 */
2815 if (inc < 0)
2816 dev->flags &= ~IFF_PROMISC;
2817 else {
2818 dev->promiscuity -= inc;
2819 printk(KERN_WARNING "%s: promiscuity touches roof, "
2820 "set promiscuity failed, promiscuity feature "
2821 "of device might be broken.\n", dev->name);
2822 return -EOVERFLOW;
2823 }
2824 }
2782 if (dev->flags != old_flags) { 2825 if (dev->flags != old_flags) {
2783 printk(KERN_INFO "device %s %s promiscuous mode\n", 2826 printk(KERN_INFO "device %s %s promiscuous mode\n",
2784 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 2827 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
@@ -2796,6 +2839,7 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
2796 if (dev->change_rx_flags) 2839 if (dev->change_rx_flags)
2797 dev->change_rx_flags(dev, IFF_PROMISC); 2840 dev->change_rx_flags(dev, IFF_PROMISC);
2798 } 2841 }
2842 return 0;
2799} 2843}
2800 2844
2801/** 2845/**
@@ -2807,14 +2851,19 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
2807 * remains above zero the interface remains promiscuous. Once it hits zero 2851 * remains above zero the interface remains promiscuous. Once it hits zero
2808 * the device reverts back to normal filtering operation. A negative inc 2852 * the device reverts back to normal filtering operation. A negative inc
2809 * value is used to drop promiscuity on the device. 2853 * value is used to drop promiscuity on the device.
2854 * Return 0 if successful or a negative errno code on error.
2810 */ 2855 */
2811void dev_set_promiscuity(struct net_device *dev, int inc) 2856int dev_set_promiscuity(struct net_device *dev, int inc)
2812{ 2857{
2813 unsigned short old_flags = dev->flags; 2858 unsigned short old_flags = dev->flags;
2859 int err;
2814 2860
2815 __dev_set_promiscuity(dev, inc); 2861 err = __dev_set_promiscuity(dev, inc);
2862 if (!err)
2863 return err;
2816 if (dev->flags != old_flags) 2864 if (dev->flags != old_flags)
2817 dev_set_rx_mode(dev); 2865 dev_set_rx_mode(dev);
2866 return err;
2818} 2867}
2819 2868
2820/** 2869/**
@@ -2827,22 +2876,38 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
2827 * to all interfaces. Once it hits zero the device reverts back to normal 2876 * to all interfaces. Once it hits zero the device reverts back to normal
2828 * filtering operation. A negative @inc value is used to drop the counter 2877 * filtering operation. A negative @inc value is used to drop the counter
2829 * when releasing a resource needing all multicasts. 2878 * when releasing a resource needing all multicasts.
2879 * Return 0 if successful or a negative errno code on error.
2830 */ 2880 */
2831 2881
2832void dev_set_allmulti(struct net_device *dev, int inc) 2882int dev_set_allmulti(struct net_device *dev, int inc)
2833{ 2883{
2834 unsigned short old_flags = dev->flags; 2884 unsigned short old_flags = dev->flags;
2835 2885
2836 ASSERT_RTNL(); 2886 ASSERT_RTNL();
2837 2887
2838 dev->flags |= IFF_ALLMULTI; 2888 dev->flags |= IFF_ALLMULTI;
2839 if ((dev->allmulti += inc) == 0) 2889 dev->allmulti += inc;
2840 dev->flags &= ~IFF_ALLMULTI; 2890 if (dev->allmulti == 0) {
2891 /*
2892 * Avoid overflow.
2893 * If inc causes overflow, untouch allmulti and return error.
2894 */
2895 if (inc < 0)
2896 dev->flags &= ~IFF_ALLMULTI;
2897 else {
2898 dev->allmulti -= inc;
2899 printk(KERN_WARNING "%s: allmulti touches roof, "
2900 "set allmulti failed, allmulti feature of "
2901 "device might be broken.\n", dev->name);
2902 return -EOVERFLOW;
2903 }
2904 }
2841 if (dev->flags ^ old_flags) { 2905 if (dev->flags ^ old_flags) {
2842 if (dev->change_rx_flags) 2906 if (dev->change_rx_flags)
2843 dev->change_rx_flags(dev, IFF_ALLMULTI); 2907 dev->change_rx_flags(dev, IFF_ALLMULTI);
2844 dev_set_rx_mode(dev); 2908 dev_set_rx_mode(dev);
2845 } 2909 }
2910 return 0;
2846} 2911}
2847 2912
2848/* 2913/*
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0133b5ebd545..14ada537f895 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -209,6 +209,36 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
209 return 0; 209 return 0;
210} 210}
211 211
212static int ethtool_set_rxhash(struct net_device *dev, void __user *useraddr)
213{
214 struct ethtool_rxnfc cmd;
215
216 if (!dev->ethtool_ops->set_rxhash)
217 return -EOPNOTSUPP;
218
219 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
220 return -EFAULT;
221
222 return dev->ethtool_ops->set_rxhash(dev, &cmd);
223}
224
225static int ethtool_get_rxhash(struct net_device *dev, void __user *useraddr)
226{
227 struct ethtool_rxnfc info;
228
229 if (!dev->ethtool_ops->get_rxhash)
230 return -EOPNOTSUPP;
231
232 if (copy_from_user(&info, useraddr, sizeof(info)))
233 return -EFAULT;
234
235 dev->ethtool_ops->get_rxhash(dev, &info);
236
237 if (copy_to_user(useraddr, &info, sizeof(info)))
238 return -EFAULT;
239 return 0;
240}
241
212static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 242static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
213{ 243{
214 struct ethtool_regs regs; 244 struct ethtool_regs regs;
@@ -826,6 +856,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
826 case ETHTOOL_GGSO: 856 case ETHTOOL_GGSO:
827 case ETHTOOL_GFLAGS: 857 case ETHTOOL_GFLAGS:
828 case ETHTOOL_GPFLAGS: 858 case ETHTOOL_GPFLAGS:
859 case ETHTOOL_GRXFH:
829 break; 860 break;
830 default: 861 default:
831 if (!capable(CAP_NET_ADMIN)) 862 if (!capable(CAP_NET_ADMIN))
@@ -977,6 +1008,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
977 rc = ethtool_set_value(dev, useraddr, 1008 rc = ethtool_set_value(dev, useraddr,
978 dev->ethtool_ops->set_priv_flags); 1009 dev->ethtool_ops->set_priv_flags);
979 break; 1010 break;
1011 case ETHTOOL_GRXFH:
1012 rc = ethtool_get_rxhash(dev, useraddr);
1013 break;
1014 case ETHTOOL_SRXFH:
1015 rc = ethtool_set_rxhash(dev, useraddr);
1016 break;
980 default: 1017 default:
981 rc = -EOPNOTSUPP; 1018 rc = -EOPNOTSUPP;
982 } 1019 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 277a2302eb3a..79de3b14a8d1 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -69,7 +69,7 @@ static void rules_ops_put(struct fib_rules_ops *ops)
69static void flush_route_cache(struct fib_rules_ops *ops) 69static void flush_route_cache(struct fib_rules_ops *ops)
70{ 70{
71 if (ops->flush_cache) 71 if (ops->flush_cache)
72 ops->flush_cache(); 72 ops->flush_cache(ops);
73} 73}
74 74
75int fib_rules_register(struct fib_rules_ops *ops) 75int fib_rules_register(struct fib_rules_ops *ops)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 90e2177af081..3f7941319217 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -242,11 +242,11 @@ static ssize_t netstat_show(const struct device *d,
242 offset % sizeof(unsigned long) != 0); 242 offset % sizeof(unsigned long) != 0);
243 243
244 read_lock(&dev_base_lock); 244 read_lock(&dev_base_lock);
245 if (dev_isalive(dev) && dev->get_stats && 245 if (dev_isalive(dev)) {
246 (stats = (*dev->get_stats)(dev))) 246 stats = dev->get_stats(dev);
247 ret = sprintf(buf, fmt_ulong, 247 ret = sprintf(buf, fmt_ulong,
248 *(unsigned long *)(((u8 *) stats) + offset)); 248 *(unsigned long *)(((u8 *) stats) + offset));
249 249 }
250 read_unlock(&dev_base_lock); 250 read_unlock(&dev_base_lock);
251 return ret; 251 return ret;
252} 252}
@@ -457,8 +457,7 @@ int netdev_register_kobject(struct net_device *net)
457 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE); 457 strlcpy(dev->bus_id, net->name, BUS_ID_SIZE);
458 458
459#ifdef CONFIG_SYSFS 459#ifdef CONFIG_SYSFS
460 if (net->get_stats) 460 *groups++ = &netstat_group;
461 *groups++ = &netstat_group;
462 461
463#ifdef CONFIG_WIRELESS_EXT 462#ifdef CONFIG_WIRELESS_EXT
464 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats) 463 if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
@@ -469,6 +468,19 @@ int netdev_register_kobject(struct net_device *net)
469 return device_add(dev); 468 return device_add(dev);
470} 469}
471 470
471int netdev_class_create_file(struct class_attribute *class_attr)
472{
473 return class_create_file(&net_class, class_attr);
474}
475
476void netdev_class_remove_file(struct class_attribute *class_attr)
477{
478 class_remove_file(&net_class, class_attr);
479}
480
481EXPORT_SYMBOL(netdev_class_create_file);
482EXPORT_SYMBOL(netdev_class_remove_file);
483
472void netdev_initialize_kobject(struct net_device *net) 484void netdev_initialize_kobject(struct net_device *net)
473{ 485{
474 struct device *device = &(net->dev); 486 struct device *device = &(net->dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a9a77216310e..6c8d7f0ea01a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -607,6 +607,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
607{ 607{
608 struct ifinfomsg *ifm; 608 struct ifinfomsg *ifm;
609 struct nlmsghdr *nlh; 609 struct nlmsghdr *nlh;
610 struct net_device_stats *stats;
611 struct nlattr *attr;
610 612
611 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 613 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
612 if (nlh == NULL) 614 if (nlh == NULL)
@@ -653,19 +655,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
653 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); 655 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
654 } 656 }
655 657
656 if (dev->get_stats) { 658 attr = nla_reserve(skb, IFLA_STATS,
657 struct net_device_stats *stats = dev->get_stats(dev); 659 sizeof(struct rtnl_link_stats));
658 if (stats) { 660 if (attr == NULL)
659 struct nlattr *attr; 661 goto nla_put_failure;
660 662
661 attr = nla_reserve(skb, IFLA_STATS, 663 stats = dev->get_stats(dev);
662 sizeof(struct rtnl_link_stats)); 664 copy_rtnl_link_stats(nla_data(attr), stats);
663 if (attr == NULL)
664 goto nla_put_failure;
665
666 copy_rtnl_link_stats(nla_data(attr), stats);
667 }
668 }
669 665
670 if (dev->rtnl_link_ops) { 666 if (dev->rtnl_link_ops) {
671 if (rtnl_link_fill(skb, dev) < 0) 667 if (rtnl_link_fill(skb, dev) < 0)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 366621610e76..7c571560e9d2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4,8 +4,6 @@
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de> 5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 * 6 *
7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
8 *
9 * Fixes: 7 * Fixes:
10 * Alan Cox : Fixed the worst of the load 8 * Alan Cox : Fixed the worst of the load
11 * balancer bugs. 9 * balancer bugs.
@@ -2592,6 +2590,13 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
2592 return true; 2590 return true;
2593} 2591}
2594 2592
2593void __skb_warn_lro_forwarding(const struct sk_buff *skb)
2594{
2595 if (net_ratelimit())
2596 pr_warning("%s: received packets cannot be forwarded"
2597 " while LRO is enabled\n", skb->dev->name);
2598}
2599
2595EXPORT_SYMBOL(___pskb_trim); 2600EXPORT_SYMBOL(___pskb_trim);
2596EXPORT_SYMBOL(__kfree_skb); 2601EXPORT_SYMBOL(__kfree_skb);
2597EXPORT_SYMBOL(kfree_skb); 2602EXPORT_SYMBOL(kfree_skb);
@@ -2625,6 +2630,7 @@ EXPORT_SYMBOL(skb_seq_read);
2625EXPORT_SYMBOL(skb_abort_seq_read); 2630EXPORT_SYMBOL(skb_abort_seq_read);
2626EXPORT_SYMBOL(skb_find_text); 2631EXPORT_SYMBOL(skb_find_text);
2627EXPORT_SYMBOL(skb_append_datato_frags); 2632EXPORT_SYMBOL(skb_append_datato_frags);
2633EXPORT_SYMBOL(__skb_warn_lro_forwarding);
2628 2634
2629EXPORT_SYMBOL_GPL(skb_to_sgvec); 2635EXPORT_SYMBOL_GPL(skb_to_sgvec);
2630EXPORT_SYMBOL_GPL(skb_cow_data); 2636EXPORT_SYMBOL_GPL(skb_cow_data);
diff --git a/net/core/sock.c b/net/core/sock.c
index 88094cb09c06..2c0ba52e5303 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -7,8 +7,6 @@
7 * handler for protocols to use and generic option handler. 7 * handler for protocols to use and generic option handler.
8 * 8 *
9 * 9 *
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
11 *
12 * Authors: Ross Biro 10 * Authors: Ross Biro
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de> 12 * Florian La Roche, <flla@stud.uni-sb.de>
@@ -1068,7 +1066,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1068 * to be taken into account in all callers. -acme 1066 * to be taken into account in all callers. -acme
1069 */ 1067 */
1070 sk_refcnt_debug_inc(newsk); 1068 sk_refcnt_debug_inc(newsk);
1071 newsk->sk_socket = NULL; 1069 sk_set_socket(newsk, NULL);
1072 newsk->sk_sleep = NULL; 1070 newsk->sk_sleep = NULL;
1073 1071
1074 if (newsk->sk_prot->sockets_allocated) 1072 if (newsk->sk_prot->sockets_allocated)
@@ -1704,7 +1702,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1704 sk->sk_rcvbuf = sysctl_rmem_default; 1702 sk->sk_rcvbuf = sysctl_rmem_default;
1705 sk->sk_sndbuf = sysctl_wmem_default; 1703 sk->sk_sndbuf = sysctl_wmem_default;
1706 sk->sk_state = TCP_CLOSE; 1704 sk->sk_state = TCP_CLOSE;
1707 sk->sk_socket = sock; 1705 sk_set_socket(sk, sock);
1708 1706
1709 sock_set_flag(sk, SOCK_ZAPPED); 1707 sock_set_flag(sk, SOCK_ZAPPED);
1710 1708
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 5fc801057244..a570e2af22cb 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -125,14 +125,6 @@ static struct ctl_table net_core_table[] = {
125#endif /* CONFIG_XFRM */ 125#endif /* CONFIG_XFRM */
126#endif /* CONFIG_NET */ 126#endif /* CONFIG_NET */
127 { 127 {
128 .ctl_name = NET_CORE_SOMAXCONN,
129 .procname = "somaxconn",
130 .data = &init_net.core.sysctl_somaxconn,
131 .maxlen = sizeof(int),
132 .mode = 0644,
133 .proc_handler = &proc_dointvec
134 },
135 {
136 .ctl_name = NET_CORE_BUDGET, 128 .ctl_name = NET_CORE_BUDGET,
137 .procname = "netdev_budget", 129 .procname = "netdev_budget",
138 .data = &netdev_budget, 130 .data = &netdev_budget,
@@ -151,6 +143,18 @@ static struct ctl_table net_core_table[] = {
151 { .ctl_name = 0 } 143 { .ctl_name = 0 }
152}; 144};
153 145
146static struct ctl_table netns_core_table[] = {
147 {
148 .ctl_name = NET_CORE_SOMAXCONN,
149 .procname = "somaxconn",
150 .data = &init_net.core.sysctl_somaxconn,
151 .maxlen = sizeof(int),
152 .mode = 0644,
153 .proc_handler = &proc_dointvec
154 },
155 { .ctl_name = 0 }
156};
157
154static __net_initdata struct ctl_path net_core_path[] = { 158static __net_initdata struct ctl_path net_core_path[] = {
155 { .procname = "net", .ctl_name = CTL_NET, }, 159 { .procname = "net", .ctl_name = CTL_NET, },
156 { .procname = "core", .ctl_name = NET_CORE, }, 160 { .procname = "core", .ctl_name = NET_CORE, },
@@ -159,23 +163,17 @@ static __net_initdata struct ctl_path net_core_path[] = {
159 163
160static __net_init int sysctl_core_net_init(struct net *net) 164static __net_init int sysctl_core_net_init(struct net *net)
161{ 165{
162 struct ctl_table *tbl, *tmp; 166 struct ctl_table *tbl;
163 167
164 net->core.sysctl_somaxconn = SOMAXCONN; 168 net->core.sysctl_somaxconn = SOMAXCONN;
165 169
166 tbl = net_core_table; 170 tbl = netns_core_table;
167 if (net != &init_net) { 171 if (net != &init_net) {
168 tbl = kmemdup(tbl, sizeof(net_core_table), GFP_KERNEL); 172 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
169 if (tbl == NULL) 173 if (tbl == NULL)
170 goto err_dup; 174 goto err_dup;
171 175
172 for (tmp = tbl; tmp->procname; tmp++) { 176 tbl[0].data = &net->core.sysctl_somaxconn;
173 if (tmp->data >= (void *)&init_net &&
174 tmp->data < (void *)(&init_net + 1))
175 tmp->data += (char *)net - (char *)&init_net;
176 else
177 tmp->mode &= ~0222;
178 }
179 } 177 }
180 178
181 net->core.sysctl_hdr = register_net_sysctl_table(net, 179 net->core.sysctl_hdr = register_net_sysctl_table(net,
@@ -186,7 +184,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
186 return 0; 184 return 0;
187 185
188err_reg: 186err_reg:
189 if (tbl != net_core_table) 187 if (tbl != netns_core_table)
190 kfree(tbl); 188 kfree(tbl);
191err_dup: 189err_dup:
192 return -ENOMEM; 190 return -ENOMEM;
@@ -198,7 +196,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
198 196
199 tbl = net->core.sysctl_hdr->ctl_table_arg; 197 tbl = net->core.sysctl_hdr->ctl_table_arg;
200 unregister_net_sysctl_table(net->core.sysctl_hdr); 198 unregister_net_sysctl_table(net->core.sysctl_hdr);
201 BUG_ON(tbl == net_core_table); 199 BUG_ON(tbl == netns_core_table);
202 kfree(tbl); 200 kfree(tbl);
203} 201}
204 202
@@ -209,6 +207,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = {
209 207
210static __init int sysctl_core_init(void) 208static __init int sysctl_core_init(void)
211{ 209{
210 register_net_sysctl_rotable(net_core_path, net_core_table);
212 return register_pernet_subsys(&sysctl_core_ops); 211 return register_pernet_subsys(&sysctl_core_ops);
213} 212}
214 213
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index f44d492d3b74..1b2cea244e12 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -262,7 +262,7 @@ extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
262 const struct dccp_hdr *dh, const unsigned len); 262 const struct dccp_hdr *dh, const unsigned len);
263 263
264extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); 264extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
265extern int dccp_destroy_sock(struct sock *sk); 265extern void dccp_destroy_sock(struct sock *sk);
266 266
267extern void dccp_close(struct sock *sk, long timeout); 267extern void dccp_close(struct sock *sk, long timeout);
268extern struct sk_buff *dccp_make_response(struct sock *sk, 268extern struct sk_buff *dccp_make_response(struct sock *sk,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index f7fe2a572d7b..eec3c4717890 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1091,10 +1091,10 @@ static int dccp_v6_init_sock(struct sock *sk)
1091 return err; 1091 return err;
1092} 1092}
1093 1093
1094static int dccp_v6_destroy_sock(struct sock *sk) 1094static void dccp_v6_destroy_sock(struct sock *sk)
1095{ 1095{
1096 dccp_destroy_sock(sk); 1096 dccp_destroy_sock(sk);
1097 return inet6_destroy_sock(sk); 1097 inet6_destroy_sock(sk);
1098} 1098}
1099 1099
1100static struct timewait_sock_ops dccp6_timewait_sock_ops = { 1100static struct timewait_sock_ops dccp6_timewait_sock_ops = {
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9dfe2470962c..a0b56009611f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -237,7 +237,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
237 237
238EXPORT_SYMBOL_GPL(dccp_init_sock); 238EXPORT_SYMBOL_GPL(dccp_init_sock);
239 239
240int dccp_destroy_sock(struct sock *sk) 240void dccp_destroy_sock(struct sock *sk)
241{ 241{
242 struct dccp_sock *dp = dccp_sk(sk); 242 struct dccp_sock *dp = dccp_sk(sk);
243 struct dccp_minisock *dmsk = dccp_msk(sk); 243 struct dccp_minisock *dmsk = dccp_msk(sk);
@@ -268,8 +268,6 @@ int dccp_destroy_sock(struct sock *sk)
268 268
269 /* clean up feature negotiation state */ 269 /* clean up feature negotiation state */
270 dccp_feat_clean(dmsk); 270 dccp_feat_clean(dmsk);
271
272 return 0;
273} 271}
274 272
275EXPORT_SYMBOL_GPL(dccp_destroy_sock); 273EXPORT_SYMBOL_GPL(dccp_destroy_sock);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index fc2efe899e91..931bdf9cb756 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1719,6 +1719,8 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1719 * See if there is data ready to read, sleep if there isn't 1719 * See if there is data ready to read, sleep if there isn't
1720 */ 1720 */
1721 for(;;) { 1721 for(;;) {
1722 DEFINE_WAIT(wait);
1723
1722 if (sk->sk_err) 1724 if (sk->sk_err)
1723 goto out; 1725 goto out;
1724 1726
@@ -1748,14 +1750,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1748 goto out; 1750 goto out;
1749 } 1751 }
1750 1752
1751 set_bit(SOCK_ASYNC_WAITDATA, &sock->flags); 1753 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1752 SOCK_SLEEP_PRE(sk) 1754 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 1755 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1754 if (!dn_data_ready(sk, queue, flags, target)) 1756 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1755 schedule(); 1757 finish_wait(sk->sk_sleep, &wait);
1756
1757 SOCK_SLEEP_POST(sk)
1758 clear_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1759 } 1758 }
1760 1759
1761 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { 1760 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) {
@@ -2002,18 +2001,19 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
2002 * size. 2001 * size.
2003 */ 2002 */
2004 if (dn_queue_too_long(scp, queue, flags)) { 2003 if (dn_queue_too_long(scp, queue, flags)) {
2004 DEFINE_WAIT(wait);
2005
2005 if (flags & MSG_DONTWAIT) { 2006 if (flags & MSG_DONTWAIT) {
2006 err = -EWOULDBLOCK; 2007 err = -EWOULDBLOCK;
2007 goto out; 2008 goto out;
2008 } 2009 }
2009 2010
2010 SOCK_SLEEP_PRE(sk) 2011 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
2011 2012 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2012 if (dn_queue_too_long(scp, queue, flags)) 2013 sk_wait_event(sk, &timeo,
2013 schedule(); 2014 !dn_queue_too_long(scp, queue, flags));
2014 2015 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2015 SOCK_SLEEP_POST(sk) 2016 finish_wait(sk->sk_sleep, &wait);
2016
2017 continue; 2017 continue;
2018 } 2018 }
2019 2019
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 5b7539b7fe0c..14fbca55e908 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -229,7 +229,7 @@ static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
229 return 0; 229 return 0;
230} 230}
231 231
232static void dn_fib_rule_flush_cache(void) 232static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
233{ 233{
234 dn_rt_cache_flush(-1); 234 dn_rt_cache_flush(-1);
235} 235}
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 7c9bb13b1539..d35127bb84e1 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -573,9 +573,7 @@ static int econet_release(struct socket *sock)
573 573
574 sk->sk_state_change(sk); /* It is useless. Just for sanity. */ 574 sk->sk_state_change(sk); /* It is useless. Just for sanity. */
575 575
576 sock->sk = NULL; 576 sock_orphan(sk);
577 sk->sk_socket = NULL;
578 sock_set_flag(sk, SOCK_DEAD);
579 577
580 /* Purge queues */ 578 /* Purge queues */
581 579
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 200ee1e63728..69dbc342a464 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -391,7 +391,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
391 391
392 wstats.updated = 0; 392 wstats.updated = 0;
393 if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { 393 if (rx_stats->mask & IEEE80211_STATMASK_RSSI) {
394 wstats.level = rx_stats->rssi; 394 wstats.level = rx_stats->signal;
395 wstats.updated |= IW_QUAL_LEVEL_UPDATED; 395 wstats.updated |= IW_QUAL_LEVEL_UPDATED;
396 } else 396 } else
397 wstats.updated |= IW_QUAL_LEVEL_INVALID; 397 wstats.updated |= IW_QUAL_LEVEL_INVALID;
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index d8b02603cbe5..d996547f7a62 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -542,90 +542,4 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
542 return 1; 542 return 1;
543} 543}
544 544
545/* Incoming 802.11 strucure is converted to a TXB
546 * a block of 802.11 fragment packets (stored as skbs) */
547int ieee80211_tx_frame(struct ieee80211_device *ieee,
548 struct ieee80211_hdr *frame, int hdr_len, int total_len,
549 int encrypt_mpdu)
550{
551 struct ieee80211_txb *txb = NULL;
552 unsigned long flags;
553 struct net_device_stats *stats = &ieee->stats;
554 struct sk_buff *skb_frag;
555 int priority = -1;
556 int fraglen = total_len;
557 int headroom = ieee->tx_headroom;
558 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
559
560 spin_lock_irqsave(&ieee->lock, flags);
561
562 if (encrypt_mpdu && (!ieee->sec.encrypt || !crypt))
563 encrypt_mpdu = 0;
564
565 /* If there is no driver handler to take the TXB, dont' bother
566 * creating it... */
567 if (!ieee->hard_start_xmit) {
568 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
569 goto success;
570 }
571
572 if (unlikely(total_len < 24)) {
573 printk(KERN_WARNING "%s: skb too small (%d).\n",
574 ieee->dev->name, total_len);
575 goto success;
576 }
577
578 if (encrypt_mpdu) {
579 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
580 fraglen += crypt->ops->extra_mpdu_prefix_len +
581 crypt->ops->extra_mpdu_postfix_len;
582 headroom += crypt->ops->extra_mpdu_prefix_len;
583 }
584
585 /* When we allocate the TXB we allocate enough space for the reserve
586 * and full fragment bytes (bytes_per_frag doesn't include prefix,
587 * postfix, header, FCS, etc.) */
588 txb = ieee80211_alloc_txb(1, fraglen, headroom, GFP_ATOMIC);
589 if (unlikely(!txb)) {
590 printk(KERN_WARNING "%s: Could not allocate TXB\n",
591 ieee->dev->name);
592 goto failed;
593 }
594 txb->encrypted = 0;
595 txb->payload_size = fraglen;
596
597 skb_frag = txb->fragments[0];
598
599 memcpy(skb_put(skb_frag, total_len), frame, total_len);
600
601 if (ieee->config &
602 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
603 skb_put(skb_frag, 4);
604
605 /* To avoid overcomplicating things, we do the corner-case frame
606 * encryption in software. The only real situation where encryption is
607 * needed here is during software-based shared key authentication. */
608 if (encrypt_mpdu)
609 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
610
611 success:
612 spin_unlock_irqrestore(&ieee->lock, flags);
613
614 if (txb) {
615 if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
616 stats->tx_packets++;
617 stats->tx_bytes += txb->payload_size;
618 return 0;
619 }
620 ieee80211_txb_free(txb);
621 }
622 return 0;
623
624 failed:
625 spin_unlock_irqrestore(&ieee->lock, flags);
626 stats->tx_errors++;
627 return 1;
628}
629
630EXPORT_SYMBOL(ieee80211_tx_frame);
631EXPORT_SYMBOL(ieee80211_txb_free); 545EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 623489afa62c..973832dd7faf 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -43,8 +43,9 @@ static const char *ieee80211_modes[] = {
43 43
44#define MAX_CUSTOM_LEN 64 44#define MAX_CUSTOM_LEN 64
45static char *ieee80211_translate_scan(struct ieee80211_device *ieee, 45static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
46 char *start, char *stop, 46 char *start, char *stop,
47 struct ieee80211_network *network) 47 struct ieee80211_network *network,
48 struct iw_request_info *info)
48{ 49{
49 char custom[MAX_CUSTOM_LEN]; 50 char custom[MAX_CUSTOM_LEN];
50 char *p; 51 char *p;
@@ -57,7 +58,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
57 iwe.cmd = SIOCGIWAP; 58 iwe.cmd = SIOCGIWAP;
58 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 59 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
59 memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN); 60 memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
60 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_ADDR_LEN); 61 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
61 62
62 /* Remaining entries will be displayed in the order we provide them */ 63 /* Remaining entries will be displayed in the order we provide them */
63 64
@@ -66,17 +67,19 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
66 iwe.u.data.flags = 1; 67 iwe.u.data.flags = 1;
67 if (network->flags & NETWORK_EMPTY_ESSID) { 68 if (network->flags & NETWORK_EMPTY_ESSID) {
68 iwe.u.data.length = sizeof("<hidden>"); 69 iwe.u.data.length = sizeof("<hidden>");
69 start = iwe_stream_add_point(start, stop, &iwe, "<hidden>"); 70 start = iwe_stream_add_point(info, start, stop,
71 &iwe, "<hidden>");
70 } else { 72 } else {
71 iwe.u.data.length = min(network->ssid_len, (u8) 32); 73 iwe.u.data.length = min(network->ssid_len, (u8) 32);
72 start = iwe_stream_add_point(start, stop, &iwe, network->ssid); 74 start = iwe_stream_add_point(info, start, stop,
75 &iwe, network->ssid);
73 } 76 }
74 77
75 /* Add the protocol name */ 78 /* Add the protocol name */
76 iwe.cmd = SIOCGIWNAME; 79 iwe.cmd = SIOCGIWNAME;
77 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", 80 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s",
78 ieee80211_modes[network->mode]); 81 ieee80211_modes[network->mode]);
79 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_CHAR_LEN); 82 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
80 83
81 /* Add mode */ 84 /* Add mode */
82 iwe.cmd = SIOCGIWMODE; 85 iwe.cmd = SIOCGIWMODE;
@@ -86,7 +89,8 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
86 else 89 else
87 iwe.u.mode = IW_MODE_ADHOC; 90 iwe.u.mode = IW_MODE_ADHOC;
88 91
89 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_UINT_LEN); 92 start = iwe_stream_add_event(info, start, stop,
93 &iwe, IW_EV_UINT_LEN);
90 } 94 }
91 95
92 /* Add channel and frequency */ 96 /* Add channel and frequency */
@@ -95,7 +99,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
95 iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel); 99 iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel);
96 iwe.u.freq.e = 6; 100 iwe.u.freq.e = 6;
97 iwe.u.freq.i = 0; 101 iwe.u.freq.i = 0;
98 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_FREQ_LEN); 102 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
99 103
100 /* Add encryption capability */ 104 /* Add encryption capability */
101 iwe.cmd = SIOCGIWENCODE; 105 iwe.cmd = SIOCGIWENCODE;
@@ -104,12 +108,13 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
104 else 108 else
105 iwe.u.data.flags = IW_ENCODE_DISABLED; 109 iwe.u.data.flags = IW_ENCODE_DISABLED;
106 iwe.u.data.length = 0; 110 iwe.u.data.length = 0;
107 start = iwe_stream_add_point(start, stop, &iwe, network->ssid); 111 start = iwe_stream_add_point(info, start, stop,
112 &iwe, network->ssid);
108 113
109 /* Add basic and extended rates */ 114 /* Add basic and extended rates */
110 /* Rate : stuffing multiple values in a single event require a bit 115 /* Rate : stuffing multiple values in a single event require a bit
111 * more of magic - Jean II */ 116 * more of magic - Jean II */
112 current_val = start + IW_EV_LCP_LEN; 117 current_val = start + iwe_stream_lcp_len(info);
113 iwe.cmd = SIOCGIWRATE; 118 iwe.cmd = SIOCGIWRATE;
114 /* Those two flags are ignored... */ 119 /* Those two flags are ignored... */
115 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; 120 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
@@ -124,17 +129,19 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
124 /* Bit rate given in 500 kb/s units (+ 0x80) */ 129 /* Bit rate given in 500 kb/s units (+ 0x80) */
125 iwe.u.bitrate.value = ((rate & 0x7f) * 500000); 130 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
126 /* Add new value to event */ 131 /* Add new value to event */
127 current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); 132 current_val = iwe_stream_add_value(info, start, current_val,
133 stop, &iwe, IW_EV_PARAM_LEN);
128 } 134 }
129 for (; j < network->rates_ex_len; j++) { 135 for (; j < network->rates_ex_len; j++) {
130 rate = network->rates_ex[j] & 0x7F; 136 rate = network->rates_ex[j] & 0x7F;
131 /* Bit rate given in 500 kb/s units (+ 0x80) */ 137 /* Bit rate given in 500 kb/s units (+ 0x80) */
132 iwe.u.bitrate.value = ((rate & 0x7f) * 500000); 138 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
133 /* Add new value to event */ 139 /* Add new value to event */
134 current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); 140 current_val = iwe_stream_add_value(info, start, current_val,
141 stop, &iwe, IW_EV_PARAM_LEN);
135 } 142 }
136 /* Check if we added any rate */ 143 /* Check if we added any rate */
137 if((current_val - start) > IW_EV_LCP_LEN) 144 if ((current_val - start) > iwe_stream_lcp_len(info))
138 start = current_val; 145 start = current_val;
139 146
140 /* Add quality statistics */ 147 /* Add quality statistics */
@@ -181,14 +188,14 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
181 iwe.u.qual.level = network->stats.signal; 188 iwe.u.qual.level = network->stats.signal;
182 } 189 }
183 190
184 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); 191 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
185 192
186 iwe.cmd = IWEVCUSTOM; 193 iwe.cmd = IWEVCUSTOM;
187 p = custom; 194 p = custom;
188 195
189 iwe.u.data.length = p - custom; 196 iwe.u.data.length = p - custom;
190 if (iwe.u.data.length) 197 if (iwe.u.data.length)
191 start = iwe_stream_add_point(start, stop, &iwe, custom); 198 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
192 199
193 memset(&iwe, 0, sizeof(iwe)); 200 memset(&iwe, 0, sizeof(iwe));
194 if (network->wpa_ie_len) { 201 if (network->wpa_ie_len) {
@@ -196,7 +203,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
196 memcpy(buf, network->wpa_ie, network->wpa_ie_len); 203 memcpy(buf, network->wpa_ie, network->wpa_ie_len);
197 iwe.cmd = IWEVGENIE; 204 iwe.cmd = IWEVGENIE;
198 iwe.u.data.length = network->wpa_ie_len; 205 iwe.u.data.length = network->wpa_ie_len;
199 start = iwe_stream_add_point(start, stop, &iwe, buf); 206 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
200 } 207 }
201 208
202 memset(&iwe, 0, sizeof(iwe)); 209 memset(&iwe, 0, sizeof(iwe));
@@ -205,7 +212,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
205 memcpy(buf, network->rsn_ie, network->rsn_ie_len); 212 memcpy(buf, network->rsn_ie, network->rsn_ie_len);
206 iwe.cmd = IWEVGENIE; 213 iwe.cmd = IWEVGENIE;
207 iwe.u.data.length = network->rsn_ie_len; 214 iwe.u.data.length = network->rsn_ie_len;
208 start = iwe_stream_add_point(start, stop, &iwe, buf); 215 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
209 } 216 }
210 217
211 /* Add EXTRA: Age to display seconds since last beacon/probe response 218 /* Add EXTRA: Age to display seconds since last beacon/probe response
@@ -217,7 +224,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
217 jiffies_to_msecs(jiffies - network->last_scanned)); 224 jiffies_to_msecs(jiffies - network->last_scanned));
218 iwe.u.data.length = p - custom; 225 iwe.u.data.length = p - custom;
219 if (iwe.u.data.length) 226 if (iwe.u.data.length)
220 start = iwe_stream_add_point(start, stop, &iwe, custom); 227 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
221 228
222 /* Add spectrum management information */ 229 /* Add spectrum management information */
223 iwe.cmd = -1; 230 iwe.cmd = -1;
@@ -238,7 +245,7 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
238 245
239 if (iwe.cmd == IWEVCUSTOM) { 246 if (iwe.cmd == IWEVCUSTOM) {
240 iwe.u.data.length = p - custom; 247 iwe.u.data.length = p - custom;
241 start = iwe_stream_add_point(start, stop, &iwe, custom); 248 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
242 } 249 }
243 250
244 return start; 251 return start;
@@ -272,7 +279,8 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
272 279
273 if (ieee->scan_age == 0 || 280 if (ieee->scan_age == 0 ||
274 time_after(network->last_scanned + ieee->scan_age, jiffies)) 281 time_after(network->last_scanned + ieee->scan_age, jiffies))
275 ev = ieee80211_translate_scan(ieee, ev, stop, network); 282 ev = ieee80211_translate_scan(ieee, ev, stop, network,
283 info);
276 else 284 else
277 IEEE80211_DEBUG_SCAN("Not showing network '%s (" 285 IEEE80211_DEBUG_SCAN("Not showing network '%s ("
278 "%s)' due to age (%dms).\n", 286 "%s)' due to age (%dms).\n",
@@ -744,98 +752,9 @@ int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
744 return 0; 752 return 0;
745} 753}
746 754
747int ieee80211_wx_set_auth(struct net_device *dev,
748 struct iw_request_info *info,
749 union iwreq_data *wrqu,
750 char *extra)
751{
752 struct ieee80211_device *ieee = netdev_priv(dev);
753 unsigned long flags;
754 int err = 0;
755
756 spin_lock_irqsave(&ieee->lock, flags);
757
758 switch (wrqu->param.flags & IW_AUTH_INDEX) {
759 case IW_AUTH_WPA_VERSION:
760 case IW_AUTH_CIPHER_PAIRWISE:
761 case IW_AUTH_CIPHER_GROUP:
762 case IW_AUTH_KEY_MGMT:
763 /*
764 * Host AP driver does not use these parameters and allows
765 * wpa_supplicant to control them internally.
766 */
767 break;
768 case IW_AUTH_TKIP_COUNTERMEASURES:
769 break; /* FIXME */
770 case IW_AUTH_DROP_UNENCRYPTED:
771 ieee->drop_unencrypted = !!wrqu->param.value;
772 break;
773 case IW_AUTH_80211_AUTH_ALG:
774 break; /* FIXME */
775 case IW_AUTH_WPA_ENABLED:
776 ieee->privacy_invoked = ieee->wpa_enabled = !!wrqu->param.value;
777 break;
778 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
779 ieee->ieee802_1x = !!wrqu->param.value;
780 break;
781 case IW_AUTH_PRIVACY_INVOKED:
782 ieee->privacy_invoked = !!wrqu->param.value;
783 break;
784 default:
785 err = -EOPNOTSUPP;
786 break;
787 }
788 spin_unlock_irqrestore(&ieee->lock, flags);
789 return err;
790}
791
792int ieee80211_wx_get_auth(struct net_device *dev,
793 struct iw_request_info *info,
794 union iwreq_data *wrqu,
795 char *extra)
796{
797 struct ieee80211_device *ieee = netdev_priv(dev);
798 unsigned long flags;
799 int err = 0;
800
801 spin_lock_irqsave(&ieee->lock, flags);
802
803 switch (wrqu->param.flags & IW_AUTH_INDEX) {
804 case IW_AUTH_WPA_VERSION:
805 case IW_AUTH_CIPHER_PAIRWISE:
806 case IW_AUTH_CIPHER_GROUP:
807 case IW_AUTH_KEY_MGMT:
808 case IW_AUTH_TKIP_COUNTERMEASURES: /* FIXME */
809 case IW_AUTH_80211_AUTH_ALG: /* FIXME */
810 /*
811 * Host AP driver does not use these parameters and allows
812 * wpa_supplicant to control them internally.
813 */
814 err = -EOPNOTSUPP;
815 break;
816 case IW_AUTH_DROP_UNENCRYPTED:
817 wrqu->param.value = ieee->drop_unencrypted;
818 break;
819 case IW_AUTH_WPA_ENABLED:
820 wrqu->param.value = ieee->wpa_enabled;
821 break;
822 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
823 wrqu->param.value = ieee->ieee802_1x;
824 break;
825 default:
826 err = -EOPNOTSUPP;
827 break;
828 }
829 spin_unlock_irqrestore(&ieee->lock, flags);
830 return err;
831}
832
833EXPORT_SYMBOL(ieee80211_wx_set_encodeext); 755EXPORT_SYMBOL(ieee80211_wx_set_encodeext);
834EXPORT_SYMBOL(ieee80211_wx_get_encodeext); 756EXPORT_SYMBOL(ieee80211_wx_get_encodeext);
835 757
836EXPORT_SYMBOL(ieee80211_wx_get_scan); 758EXPORT_SYMBOL(ieee80211_wx_get_scan);
837EXPORT_SYMBOL(ieee80211_wx_set_encode); 759EXPORT_SYMBOL(ieee80211_wx_set_encode);
838EXPORT_SYMBOL(ieee80211_wx_get_encode); 760EXPORT_SYMBOL(ieee80211_wx_get_encode);
839
840EXPORT_SYMBOL_GPL(ieee80211_wx_set_auth);
841EXPORT_SYMBOL_GPL(ieee80211_wx_get_auth);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 24eca23c2db3..dc411335c14f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PF_INET protocol family socket handler. 6 * PF_INET protocol family socket handler.
7 * 7 *
8 * Version: $Id: af_inet.c,v 1.137 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de> 10 * Florian La Roche, <flla@stud.uni-sb.de>
@@ -1481,14 +1479,15 @@ static int __init inet_init(void)
1481 * Initialise the multicast router 1479 * Initialise the multicast router
1482 */ 1480 */
1483#if defined(CONFIG_IP_MROUTE) 1481#if defined(CONFIG_IP_MROUTE)
1484 ip_mr_init(); 1482 if (ip_mr_init())
1483 printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n");
1485#endif 1484#endif
1486 /* 1485 /*
1487 * Initialise per-cpu ipv4 mibs 1486 * Initialise per-cpu ipv4 mibs
1488 */ 1487 */
1489 1488
1490 if (init_ipv4_mibs()) 1489 if (init_ipv4_mibs())
1491 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; 1490 printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n");
1492 1491
1493 ipv4_proc_init(); 1492 ipv4_proc_init();
1494 1493
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 9b539fa9fe18..29df75a6bcc7 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1,7 +1,5 @@
1/* linux/net/ipv4/arp.c 1/* linux/net/ipv4/arp.c
2 * 2 *
3 * Version: $Id: arp.c,v 1.99 2001/08/30 22:55:42 davem Exp $
4 *
5 * Copyright (C) 1994 by Florian La Roche 3 * Copyright (C) 1994 by Florian La Roche
6 * 4 *
7 * This module implements the Address Resolution Protocol ARP (RFC 826), 5 * This module implements the Address Resolution Protocol ARP (RFC 826),
@@ -1199,7 +1197,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo
1199 switch (event) { 1197 switch (event) {
1200 case NETDEV_CHANGEADDR: 1198 case NETDEV_CHANGEADDR:
1201 neigh_changeaddr(&arp_tbl, dev); 1199 neigh_changeaddr(&arp_tbl, dev);
1202 rt_cache_flush(0); 1200 rt_cache_flush(dev_net(dev), 0);
1203 break; 1201 break;
1204 default: 1202 default:
1205 break; 1203 break;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 79a7ef6209ff..2e667e2f90df 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * NET3 IP device support routines. 2 * NET3 IP device support routines.
3 * 3 *
4 * Version: $Id: devinet.c,v 1.44 2001/10/31 21:55:54 davem Exp $
5 *
6 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
@@ -170,6 +168,8 @@ static struct in_device *inetdev_init(struct net_device *dev)
170 in_dev->dev = dev; 168 in_dev->dev = dev;
171 if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) 169 if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL)
172 goto out_kfree; 170 goto out_kfree;
171 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
172 dev_disable_lro(dev);
173 /* Reference in_dev->dev */ 173 /* Reference in_dev->dev */
174 dev_hold(dev); 174 dev_hold(dev);
175 /* Account for reference dev->ip_ptr (below) */ 175 /* Account for reference dev->ip_ptr (below) */
@@ -1013,7 +1013,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1013 memcpy(old, ifa->ifa_label, IFNAMSIZ); 1013 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1014 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 1014 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1015 if (named++ == 0) 1015 if (named++ == 0)
1016 continue; 1016 goto skip;
1017 dot = strchr(old, ':'); 1017 dot = strchr(old, ':');
1018 if (dot == NULL) { 1018 if (dot == NULL) {
1019 sprintf(old, ":%d", named); 1019 sprintf(old, ":%d", named);
@@ -1024,6 +1024,8 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1024 } else { 1024 } else {
1025 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); 1025 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1026 } 1026 }
1027skip:
1028 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1027 } 1029 }
1028} 1030}
1029 1031
@@ -1241,6 +1243,8 @@ static void inet_forward_change(struct net *net)
1241 read_lock(&dev_base_lock); 1243 read_lock(&dev_base_lock);
1242 for_each_netdev(net, dev) { 1244 for_each_netdev(net, dev) {
1243 struct in_device *in_dev; 1245 struct in_device *in_dev;
1246 if (on)
1247 dev_disable_lro(dev);
1244 rcu_read_lock(); 1248 rcu_read_lock();
1245 in_dev = __in_dev_get_rcu(dev); 1249 in_dev = __in_dev_get_rcu(dev);
1246 if (in_dev) 1250 if (in_dev)
@@ -1248,8 +1252,6 @@ static void inet_forward_change(struct net *net)
1248 rcu_read_unlock(); 1252 rcu_read_unlock();
1249 } 1253 }
1250 read_unlock(&dev_base_lock); 1254 read_unlock(&dev_base_lock);
1251
1252 rt_cache_flush(0);
1253} 1255}
1254 1256
1255static int devinet_conf_proc(ctl_table *ctl, int write, 1257static int devinet_conf_proc(ctl_table *ctl, int write,
@@ -1335,10 +1337,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1335 if (write && *valp != val) { 1337 if (write && *valp != val) {
1336 struct net *net = ctl->extra2; 1338 struct net *net = ctl->extra2;
1337 1339
1338 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) 1340 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1339 inet_forward_change(net); 1341 rtnl_lock();
1340 else if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) 1342 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1341 rt_cache_flush(0); 1343 inet_forward_change(net);
1344 } else if (*valp) {
1345 struct ipv4_devconf *cnf = ctl->extra1;
1346 struct in_device *idev =
1347 container_of(cnf, struct in_device, cnf);
1348 dev_disable_lro(idev->dev);
1349 }
1350 rtnl_unlock();
1351 rt_cache_flush(net, 0);
1352 }
1342 } 1353 }
1343 1354
1344 return ret; 1355 return ret;
@@ -1351,9 +1362,10 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1351 int *valp = ctl->data; 1362 int *valp = ctl->data;
1352 int val = *valp; 1363 int val = *valp;
1353 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 1364 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
1365 struct net *net = ctl->extra2;
1354 1366
1355 if (write && *valp != val) 1367 if (write && *valp != val)
1356 rt_cache_flush(0); 1368 rt_cache_flush(net, 0);
1357 1369
1358 return ret; 1370 return ret;
1359} 1371}
@@ -1364,9 +1376,10 @@ int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
1364{ 1376{
1365 int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp, 1377 int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp,
1366 newval, newlen); 1378 newval, newlen);
1379 struct net *net = table->extra2;
1367 1380
1368 if (ret == 1) 1381 if (ret == 1)
1369 rt_cache_flush(0); 1382 rt_cache_flush(net, 0);
1370 1383
1371 return ret; 1384 return ret;
1372} 1385}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 0b2ac6a3d903..65c1503f8cc8 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 Forwarding Information Base: FIB frontend. 6 * IPv4 Forwarding Information Base: FIB frontend.
7 * 7 *
8 * Version: $Id: fib_frontend.c,v 1.26 2001/10/31 21:55:54 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -146,7 +144,7 @@ static void fib_flush(struct net *net)
146 } 144 }
147 145
148 if (flushed) 146 if (flushed)
149 rt_cache_flush(-1); 147 rt_cache_flush(net, -1);
150} 148}
151 149
152/* 150/*
@@ -899,21 +897,22 @@ static void fib_disable_ip(struct net_device *dev, int force)
899{ 897{
900 if (fib_sync_down_dev(dev, force)) 898 if (fib_sync_down_dev(dev, force))
901 fib_flush(dev_net(dev)); 899 fib_flush(dev_net(dev));
902 rt_cache_flush(0); 900 rt_cache_flush(dev_net(dev), 0);
903 arp_ifdown(dev); 901 arp_ifdown(dev);
904} 902}
905 903
906static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) 904static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
907{ 905{
908 struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; 906 struct in_ifaddr *ifa = (struct in_ifaddr*)ptr;
907 struct net_device *dev = ifa->ifa_dev->dev;
909 908
910 switch (event) { 909 switch (event) {
911 case NETDEV_UP: 910 case NETDEV_UP:
912 fib_add_ifaddr(ifa); 911 fib_add_ifaddr(ifa);
913#ifdef CONFIG_IP_ROUTE_MULTIPATH 912#ifdef CONFIG_IP_ROUTE_MULTIPATH
914 fib_sync_up(ifa->ifa_dev->dev); 913 fib_sync_up(dev);
915#endif 914#endif
916 rt_cache_flush(-1); 915 rt_cache_flush(dev_net(dev), -1);
917 break; 916 break;
918 case NETDEV_DOWN: 917 case NETDEV_DOWN:
919 fib_del_ifaddr(ifa); 918 fib_del_ifaddr(ifa);
@@ -921,9 +920,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
921 /* Last address was deleted from this interface. 920 /* Last address was deleted from this interface.
922 Disable IP. 921 Disable IP.
923 */ 922 */
924 fib_disable_ip(ifa->ifa_dev->dev, 1); 923 fib_disable_ip(dev, 1);
925 } else { 924 } else {
926 rt_cache_flush(-1); 925 rt_cache_flush(dev_net(dev), -1);
927 } 926 }
928 break; 927 break;
929 } 928 }
@@ -951,14 +950,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
951#ifdef CONFIG_IP_ROUTE_MULTIPATH 950#ifdef CONFIG_IP_ROUTE_MULTIPATH
952 fib_sync_up(dev); 951 fib_sync_up(dev);
953#endif 952#endif
954 rt_cache_flush(-1); 953 rt_cache_flush(dev_net(dev), -1);
955 break; 954 break;
956 case NETDEV_DOWN: 955 case NETDEV_DOWN:
957 fib_disable_ip(dev, 0); 956 fib_disable_ip(dev, 0);
958 break; 957 break;
959 case NETDEV_CHANGEMTU: 958 case NETDEV_CHANGEMTU:
960 case NETDEV_CHANGE: 959 case NETDEV_CHANGE:
961 rt_cache_flush(0); 960 rt_cache_flush(dev_net(dev), 0);
962 break; 961 break;
963 } 962 }
964 return NOTIFY_DONE; 963 return NOTIFY_DONE;
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 2e2fc3376ac9..c8cac6c7f881 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 FIB: lookup engine and maintenance routines. 6 * IPv4 FIB: lookup engine and maintenance routines.
7 * 7 *
8 * Version: $Id: fib_hash.c,v 1.13 2001/10/31 21:55:54 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -474,7 +472,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
474 472
475 fib_release_info(fi_drop); 473 fib_release_info(fi_drop);
476 if (state & FA_S_ACCESSED) 474 if (state & FA_S_ACCESSED)
477 rt_cache_flush(-1); 475 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
478 rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id, 476 rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id,
479 &cfg->fc_nlinfo, NLM_F_REPLACE); 477 &cfg->fc_nlinfo, NLM_F_REPLACE);
480 return 0; 478 return 0;
@@ -534,7 +532,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
534 532
535 if (new_f) 533 if (new_f)
536 fz->fz_nent++; 534 fz->fz_nent++;
537 rt_cache_flush(-1); 535 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
538 536
539 rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id, 537 rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
540 &cfg->fc_nlinfo, 0); 538 &cfg->fc_nlinfo, 0);
@@ -616,7 +614,7 @@ static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg)
616 write_unlock_bh(&fib_hash_lock); 614 write_unlock_bh(&fib_hash_lock);
617 615
618 if (fa->fa_state & FA_S_ACCESSED) 616 if (fa->fa_state & FA_S_ACCESSED)
619 rt_cache_flush(-1); 617 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
620 fn_free_alias(fa, f); 618 fn_free_alias(fa, f);
621 if (kill_fn) { 619 if (kill_fn) {
622 fn_free_node(f); 620 fn_free_node(f);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 1fb56876be54..6080d7120821 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -258,9 +258,9 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
258 + nla_total_size(4); /* flow */ 258 + nla_total_size(4); /* flow */
259} 259}
260 260
261static void fib4_rule_flush_cache(void) 261static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
262{ 262{
263 rt_cache_flush(-1); 263 rt_cache_flush(ops->fro_net, -1);
264} 264}
265 265
266static struct fib_rules_ops fib4_rules_ops_template = { 266static struct fib_rules_ops fib4_rules_ops_template = {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 0d4d72827e4b..ded2ae34eab1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * IPv4 Forwarding Information Base: semantics. 6 * IPv4 Forwarding Information Base: semantics.
7 * 7 *
8 * Version: $Id: fib_semantics.c,v 1.19 2002/01/12 07:54:56 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 4b02d14e7ab9..d16ae4623be6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -22,8 +22,6 @@
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson 22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 * 24 *
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
26 *
27 * 25 *
28 * Code from fib_hash has been reused which includes the following header: 26 * Code from fib_hash has been reused which includes the following header:
29 * 27 *
@@ -1273,7 +1271,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1273 1271
1274 fib_release_info(fi_drop); 1272 fib_release_info(fi_drop);
1275 if (state & FA_S_ACCESSED) 1273 if (state & FA_S_ACCESSED)
1276 rt_cache_flush(-1); 1274 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1277 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, 1275 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1278 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); 1276 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1279 1277
@@ -1318,7 +1316,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1318 list_add_tail_rcu(&new_fa->fa_list, 1316 list_add_tail_rcu(&new_fa->fa_list,
1319 (fa ? &fa->fa_list : fa_head)); 1317 (fa ? &fa->fa_list : fa_head));
1320 1318
1321 rt_cache_flush(-1); 1319 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1322 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, 1320 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1323 &cfg->fc_nlinfo, 0); 1321 &cfg->fc_nlinfo, 0);
1324succeeded: 1322succeeded:
@@ -1666,7 +1664,7 @@ static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1666 trie_leaf_remove(t, l); 1664 trie_leaf_remove(t, l);
1667 1665
1668 if (fa->fa_state & FA_S_ACCESSED) 1666 if (fa->fa_state & FA_S_ACCESSED)
1669 rt_cache_flush(-1); 1667 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1670 1668
1671 fib_release_info(fa->fa_info); 1669 fib_release_info(fa->fa_info);
1672 alias_free_mem_rcu(fa); 1670 alias_free_mem_rcu(fa);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 87397351ddac..aa7cf46853b7 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Alan Cox, <alan@redhat.com> 4 * Alan Cox, <alan@redhat.com>
5 * 5 *
6 * Version: $Id: icmp.c,v 1.85 2002/02/01 22:01:03 davem Exp $
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 2769dc4a4c84..68e84a933e90 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -8,8 +8,6 @@
8 * the older version didn't come out right using gcc 2.5.8, the newer one 8 * the older version didn't come out right using gcc 2.5.8, the newer one
9 * seems to fall out with gcc 2.6.2. 9 * seems to fall out with gcc 2.6.2.
10 * 10 *
11 * Version: $Id: igmp.c,v 1.47 2002/02/01 22:01:03 davem Exp $
12 *
13 * Authors: 11 * Authors:
14 * Alan Cox <Alan.Cox@linux.org> 12 * Alan Cox <Alan.Cox@linux.org>
15 * 13 *
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ec834480abe7..5bbf00051512 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -103,7 +103,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
103 rover = net_random() % remaining + low; 103 rover = net_random() % remaining + low;
104 104
105 do { 105 do {
106 head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; 106 head = &hashinfo->bhash[inet_bhashfn(net, rover,
107 hashinfo->bhash_size)];
107 spin_lock(&head->lock); 108 spin_lock(&head->lock);
108 inet_bind_bucket_for_each(tb, node, &head->chain) 109 inet_bind_bucket_for_each(tb, node, &head->chain)
109 if (tb->ib_net == net && tb->port == rover) 110 if (tb->ib_net == net && tb->port == rover)
@@ -130,7 +131,8 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
130 */ 131 */
131 snum = rover; 132 snum = rover;
132 } else { 133 } else {
133 head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; 134 head = &hashinfo->bhash[inet_bhashfn(net, snum,
135 hashinfo->bhash_size)];
134 spin_lock(&head->lock); 136 spin_lock(&head->lock);
135 inet_bind_bucket_for_each(tb, node, &head->chain) 137 inet_bind_bucket_for_each(tb, node, &head->chain)
136 if (tb->ib_net == net && tb->port == snum) 138 if (tb->ib_net == net && tb->port == snum)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index da97695e7096..c10036e7a463 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * inet_diag.c Module for monitoring INET transport protocols sockets. 2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 * 3 *
4 * Version: $Id: inet_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 *
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 2023d37b2708..eca5899729e3 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -70,7 +70,8 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
70static void __inet_put_port(struct sock *sk) 70static void __inet_put_port(struct sock *sk)
71{ 71{
72 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 72 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
73 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); 73 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num,
74 hashinfo->bhash_size);
74 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 75 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
75 struct inet_bind_bucket *tb; 76 struct inet_bind_bucket *tb;
76 77
@@ -95,7 +96,8 @@ EXPORT_SYMBOL(inet_put_port);
95void __inet_inherit_port(struct sock *sk, struct sock *child) 96void __inet_inherit_port(struct sock *sk, struct sock *child)
96{ 97{
97 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; 98 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
98 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); 99 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num,
100 table->bhash_size);
99 struct inet_bind_hashbucket *head = &table->bhash[bhash]; 101 struct inet_bind_hashbucket *head = &table->bhash[bhash];
100 struct inet_bind_bucket *tb; 102 struct inet_bind_bucket *tb;
101 103
@@ -192,7 +194,7 @@ struct sock *__inet_lookup_listener(struct net *net,
192 const struct hlist_head *head; 194 const struct hlist_head *head;
193 195
194 read_lock(&hashinfo->lhash_lock); 196 read_lock(&hashinfo->lhash_lock);
195 head = &hashinfo->listening_hash[inet_lhashfn(hnum)]; 197 head = &hashinfo->listening_hash[inet_lhashfn(net, hnum)];
196 if (!hlist_empty(head)) { 198 if (!hlist_empty(head)) {
197 const struct inet_sock *inet = inet_sk((sk = __sk_head(head))); 199 const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
198 200
@@ -225,7 +227,7 @@ struct sock * __inet_lookup_established(struct net *net,
225 /* Optimize here for direct hit, only listening connections can 227 /* Optimize here for direct hit, only listening connections can
226 * have wildcards anyways. 228 * have wildcards anyways.
227 */ 229 */
228 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport); 230 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
229 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); 231 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
230 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); 232 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
231 233
@@ -265,13 +267,13 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
265 int dif = sk->sk_bound_dev_if; 267 int dif = sk->sk_bound_dev_if;
266 INET_ADDR_COOKIE(acookie, saddr, daddr) 268 INET_ADDR_COOKIE(acookie, saddr, daddr)
267 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); 269 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
268 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); 270 struct net *net = sock_net(sk);
271 unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->dport);
269 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 272 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
270 rwlock_t *lock = inet_ehash_lockp(hinfo, hash); 273 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
271 struct sock *sk2; 274 struct sock *sk2;
272 const struct hlist_node *node; 275 const struct hlist_node *node;
273 struct inet_timewait_sock *tw; 276 struct inet_timewait_sock *tw;
274 struct net *net = sock_net(sk);
275 277
276 prefetch(head->chain.first); 278 prefetch(head->chain.first);
277 write_lock(lock); 279 write_lock(lock);
@@ -438,7 +440,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
438 local_bh_disable(); 440 local_bh_disable();
439 for (i = 1; i <= remaining; i++) { 441 for (i = 1; i <= remaining; i++) {
440 port = low + (i + offset) % remaining; 442 port = low + (i + offset) % remaining;
441 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; 443 head = &hinfo->bhash[inet_bhashfn(net, port,
444 hinfo->bhash_size)];
442 spin_lock(&head->lock); 445 spin_lock(&head->lock);
443 446
444 /* Does not bother with rcv_saddr checks, 447 /* Does not bother with rcv_saddr checks,
@@ -493,7 +496,7 @@ ok:
493 goto out; 496 goto out;
494 } 497 }
495 498
496 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; 499 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
497 tb = inet_csk(sk)->icsk_bind_hash; 500 tb = inet_csk(sk)->icsk_bind_hash;
498 spin_lock_bh(&head->lock); 501 spin_lock_bh(&head->lock);
499 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 502 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ce16e9ac24c1..06006a5ac8b9 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -32,7 +32,8 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
32 write_unlock(lock); 32 write_unlock(lock);
33 33
34 /* Disassociate with bind bucket. */ 34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)]; 35 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
36 hashinfo->bhash_size)];
36 spin_lock(&bhead->lock); 37 spin_lock(&bhead->lock);
37 tb = tw->tw_tb; 38 tb = tw->tw_tb;
38 __hlist_del(&tw->tw_bind_node); 39 __hlist_del(&tw->tw_bind_node);
@@ -81,7 +82,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
81 Note, that any socket with inet->num != 0 MUST be bound in 82 Note, that any socket with inet->num != 0 MUST be bound in
82 binding cache, even if it is closed. 83 binding cache, even if it is closed.
83 */ 84 */
84 bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)]; 85 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num,
86 hashinfo->bhash_size)];
85 spin_lock(&bhead->lock); 87 spin_lock(&bhead->lock);
86 tw->tw_tb = icsk->icsk_bind_hash; 88 tw->tw_tb = icsk->icsk_bind_hash;
87 BUG_TRAP(icsk->icsk_bind_hash); 89 BUG_TRAP(icsk->icsk_bind_hash);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index af995198f643..a456ceeac3f2 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources. 4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 * 5 *
6 * Version: $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $
7 *
8 * Authors: Andrey V. Savochkin <saw@msu.ru> 6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 */ 7 */
10 8
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 4813c39b438b..da14725916d3 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP forwarding functionality. 6 * The IP forwarding functionality.
7 * 7 *
8 * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $
9 *
10 * Authors: see ip.c 8 * Authors: see ip.c
11 * 9 *
12 * Fixes: 10 * Fixes:
@@ -58,6 +56,9 @@ int ip_forward(struct sk_buff *skb)
58 struct rtable *rt; /* Route we use */ 56 struct rtable *rt; /* Route we use */
59 struct ip_options * opt = &(IPCB(skb)->opt); 57 struct ip_options * opt = &(IPCB(skb)->opt);
60 58
59 if (skb_warn_if_lro(skb))
60 goto drop;
61
61 if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) 62 if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb))
62 goto drop; 63 goto drop;
63 64
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 37221f659159..fbd5804b5d83 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP fragmentation functionality. 6 * The IP fragmentation functionality.
7 * 7 *
8 * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $
9 *
10 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> 8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox <Alan.Cox@linux.org> 9 * Alan Cox <Alan.Cox@linux.org>
12 * 10 *
@@ -600,7 +598,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
600#ifdef CONFIG_SYSCTL 598#ifdef CONFIG_SYSCTL
601static int zero; 599static int zero;
602 600
603static struct ctl_table ip4_frags_ctl_table[] = { 601static struct ctl_table ip4_frags_ns_ctl_table[] = {
604 { 602 {
605 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, 603 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH,
606 .procname = "ipfrag_high_thresh", 604 .procname = "ipfrag_high_thresh",
@@ -626,6 +624,10 @@ static struct ctl_table ip4_frags_ctl_table[] = {
626 .proc_handler = &proc_dointvec_jiffies, 624 .proc_handler = &proc_dointvec_jiffies,
627 .strategy = &sysctl_jiffies 625 .strategy = &sysctl_jiffies
628 }, 626 },
627 { }
628};
629
630static struct ctl_table ip4_frags_ctl_table[] = {
629 { 631 {
630 .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL, 632 .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL,
631 .procname = "ipfrag_secret_interval", 633 .procname = "ipfrag_secret_interval",
@@ -646,22 +648,20 @@ static struct ctl_table ip4_frags_ctl_table[] = {
646 { } 648 { }
647}; 649};
648 650
649static int ip4_frags_ctl_register(struct net *net) 651static int ip4_frags_ns_ctl_register(struct net *net)
650{ 652{
651 struct ctl_table *table; 653 struct ctl_table *table;
652 struct ctl_table_header *hdr; 654 struct ctl_table_header *hdr;
653 655
654 table = ip4_frags_ctl_table; 656 table = ip4_frags_ns_ctl_table;
655 if (net != &init_net) { 657 if (net != &init_net) {
656 table = kmemdup(table, sizeof(ip4_frags_ctl_table), GFP_KERNEL); 658 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
657 if (table == NULL) 659 if (table == NULL)
658 goto err_alloc; 660 goto err_alloc;
659 661
660 table[0].data = &net->ipv4.frags.high_thresh; 662 table[0].data = &net->ipv4.frags.high_thresh;
661 table[1].data = &net->ipv4.frags.low_thresh; 663 table[1].data = &net->ipv4.frags.low_thresh;
662 table[2].data = &net->ipv4.frags.timeout; 664 table[2].data = &net->ipv4.frags.timeout;
663 table[3].mode &= ~0222;
664 table[4].mode &= ~0222;
665 } 665 }
666 666
667 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); 667 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
@@ -678,7 +678,7 @@ err_alloc:
678 return -ENOMEM; 678 return -ENOMEM;
679} 679}
680 680
681static void ip4_frags_ctl_unregister(struct net *net) 681static void ip4_frags_ns_ctl_unregister(struct net *net)
682{ 682{
683 struct ctl_table *table; 683 struct ctl_table *table;
684 684
@@ -686,13 +686,22 @@ static void ip4_frags_ctl_unregister(struct net *net)
686 unregister_net_sysctl_table(net->ipv4.frags_hdr); 686 unregister_net_sysctl_table(net->ipv4.frags_hdr);
687 kfree(table); 687 kfree(table);
688} 688}
689
690static void ip4_frags_ctl_register(void)
691{
692 register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
693}
689#else 694#else
690static inline int ip4_frags_ctl_register(struct net *net) 695static inline int ip4_frags_ns_ctl_register(struct net *net)
691{ 696{
692 return 0; 697 return 0;
693} 698}
694 699
695static inline void ip4_frags_ctl_unregister(struct net *net) 700static inline void ip4_frags_ns_ctl_unregister(struct net *net)
701{
702}
703
704static inline void ip4_frags_ctl_register(void)
696{ 705{
697} 706}
698#endif 707#endif
@@ -716,12 +725,12 @@ static int ipv4_frags_init_net(struct net *net)
716 725
717 inet_frags_init_net(&net->ipv4.frags); 726 inet_frags_init_net(&net->ipv4.frags);
718 727
719 return ip4_frags_ctl_register(net); 728 return ip4_frags_ns_ctl_register(net);
720} 729}
721 730
722static void ipv4_frags_exit_net(struct net *net) 731static void ipv4_frags_exit_net(struct net *net)
723{ 732{
724 ip4_frags_ctl_unregister(net); 733 ip4_frags_ns_ctl_unregister(net);
725 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 734 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
726} 735}
727 736
@@ -732,6 +741,7 @@ static struct pernet_operations ip4_frags_ops = {
732 741
733void __init ipfrag_init(void) 742void __init ipfrag_init(void)
734{ 743{
744 ip4_frags_ctl_register();
735 register_pernet_subsys(&ip4_frags_ops); 745 register_pernet_subsys(&ip4_frags_ops);
736 ip4_frags.hashfn = ip4_hashfn; 746 ip4_frags.hashfn = ip4_hashfn;
737 ip4_frags.constructor = ip4_frag_init; 747 ip4_frags.constructor = ip4_frag_init;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 4342cba4ff82..2a61158ea722 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -473,6 +473,8 @@ static int ipgre_rcv(struct sk_buff *skb)
473 read_lock(&ipgre_lock); 473 read_lock(&ipgre_lock);
474 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev), 474 if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev),
475 iph->saddr, iph->daddr, key)) != NULL) { 475 iph->saddr, iph->daddr, key)) != NULL) {
476 struct net_device_stats *stats = &tunnel->dev->stats;
477
476 secpath_reset(skb); 478 secpath_reset(skb);
477 479
478 skb->protocol = *(__be16*)(h + 2); 480 skb->protocol = *(__be16*)(h + 2);
@@ -497,28 +499,28 @@ static int ipgre_rcv(struct sk_buff *skb)
497 /* Looped back packet, drop it! */ 499 /* Looped back packet, drop it! */
498 if (skb->rtable->fl.iif == 0) 500 if (skb->rtable->fl.iif == 0)
499 goto drop; 501 goto drop;
500 tunnel->stat.multicast++; 502 stats->multicast++;
501 skb->pkt_type = PACKET_BROADCAST; 503 skb->pkt_type = PACKET_BROADCAST;
502 } 504 }
503#endif 505#endif
504 506
505 if (((flags&GRE_CSUM) && csum) || 507 if (((flags&GRE_CSUM) && csum) ||
506 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { 508 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
507 tunnel->stat.rx_crc_errors++; 509 stats->rx_crc_errors++;
508 tunnel->stat.rx_errors++; 510 stats->rx_errors++;
509 goto drop; 511 goto drop;
510 } 512 }
511 if (tunnel->parms.i_flags&GRE_SEQ) { 513 if (tunnel->parms.i_flags&GRE_SEQ) {
512 if (!(flags&GRE_SEQ) || 514 if (!(flags&GRE_SEQ) ||
513 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { 515 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
514 tunnel->stat.rx_fifo_errors++; 516 stats->rx_fifo_errors++;
515 tunnel->stat.rx_errors++; 517 stats->rx_errors++;
516 goto drop; 518 goto drop;
517 } 519 }
518 tunnel->i_seqno = seqno + 1; 520 tunnel->i_seqno = seqno + 1;
519 } 521 }
520 tunnel->stat.rx_packets++; 522 stats->rx_packets++;
521 tunnel->stat.rx_bytes += skb->len; 523 stats->rx_bytes += skb->len;
522 skb->dev = tunnel->dev; 524 skb->dev = tunnel->dev;
523 dst_release(skb->dst); 525 dst_release(skb->dst);
524 skb->dst = NULL; 526 skb->dst = NULL;
@@ -540,7 +542,7 @@ drop_nolock:
540static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 542static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
541{ 543{
542 struct ip_tunnel *tunnel = netdev_priv(dev); 544 struct ip_tunnel *tunnel = netdev_priv(dev);
543 struct net_device_stats *stats = &tunnel->stat; 545 struct net_device_stats *stats = &tunnel->dev->stats;
544 struct iphdr *old_iph = ip_hdr(skb); 546 struct iphdr *old_iph = ip_hdr(skb);
545 struct iphdr *tiph; 547 struct iphdr *tiph;
546 u8 tos; 548 u8 tos;
@@ -554,7 +556,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
554 int mtu; 556 int mtu;
555 557
556 if (tunnel->recursion++) { 558 if (tunnel->recursion++) {
557 tunnel->stat.collisions++; 559 stats->collisions++;
558 goto tx_error; 560 goto tx_error;
559 } 561 }
560 562
@@ -570,7 +572,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
570 /* NBMA tunnel */ 572 /* NBMA tunnel */
571 573
572 if (skb->dst == NULL) { 574 if (skb->dst == NULL) {
573 tunnel->stat.tx_fifo_errors++; 575 stats->tx_fifo_errors++;
574 goto tx_error; 576 goto tx_error;
575 } 577 }
576 578
@@ -621,7 +623,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
621 .tos = RT_TOS(tos) } }, 623 .tos = RT_TOS(tos) } },
622 .proto = IPPROTO_GRE }; 624 .proto = IPPROTO_GRE };
623 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 625 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
624 tunnel->stat.tx_carrier_errors++; 626 stats->tx_carrier_errors++;
625 goto tx_error; 627 goto tx_error;
626 } 628 }
627 } 629 }
@@ -629,7 +631,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
629 631
630 if (tdev == dev) { 632 if (tdev == dev) {
631 ip_rt_put(rt); 633 ip_rt_put(rt);
632 tunnel->stat.collisions++; 634 stats->collisions++;
633 goto tx_error; 635 goto tx_error;
634 } 636 }
635 637
@@ -954,11 +956,6 @@ done:
954 return err; 956 return err;
955} 957}
956 958
957static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
958{
959 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
960}
961
962static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 959static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
963{ 960{
964 struct ip_tunnel *tunnel = netdev_priv(dev); 961 struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -1084,7 +1081,6 @@ static void ipgre_tunnel_setup(struct net_device *dev)
1084 dev->uninit = ipgre_tunnel_uninit; 1081 dev->uninit = ipgre_tunnel_uninit;
1085 dev->destructor = free_netdev; 1082 dev->destructor = free_netdev;
1086 dev->hard_start_xmit = ipgre_tunnel_xmit; 1083 dev->hard_start_xmit = ipgre_tunnel_xmit;
1087 dev->get_stats = ipgre_tunnel_get_stats;
1088 dev->do_ioctl = ipgre_tunnel_ioctl; 1084 dev->do_ioctl = ipgre_tunnel_ioctl;
1089 dev->change_mtu = ipgre_tunnel_change_mtu; 1085 dev->change_mtu = ipgre_tunnel_change_mtu;
1090 1086
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index ff77a4a7f9ec..7c26428ea67b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The Internet Protocol (IP) module. 6 * The Internet Protocol (IP) module.
7 * 7 *
8 * Version: $Id: ip_input.c,v 1.55 2002/01/12 07:39:45 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 10 * Donald Becker, <becker@super.org>
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 33126ad2cfdc..be3f18a7a40e 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The options processing module for ip.c 6 * The options processing module for ip.c
7 * 7 *
8 * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $
9 *
10 * Authors: A.N.Kuznetsov 8 * Authors: A.N.Kuznetsov
11 * 9 *
12 */ 10 */
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e527628f56cf..f1278eecf56d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The Internet Protocol (IP) output module. 6 * The Internet Protocol (IP) output module.
7 * 7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 10 * Donald Becker, <becker@super.org>
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index e0514e82308e..105d92a039b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The IP to API glue. 6 * The IP to API glue.
7 * 7 *
8 * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: see ip.c 8 * Authors: see ip.c
11 * 9 *
12 * Fixes: 10 * Fixes:
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index ed45037ce9be..b88aa9afa42e 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: ipconfig.c,v 1.46 2002/02/01 22:01:04 davem Exp $
3 *
4 * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or 2 * Automatic Configuration of IP -- use DHCP, BOOTP, RARP, or
5 * user-supplied information to configure own IP address and routes. 3 * user-supplied information to configure own IP address and routes.
6 * 4 *
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index af5cb53da5cc..4c6d2caf9203 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Linux NET3: IP/IP protocol decoder. 2 * Linux NET3: IP/IP protocol decoder.
3 * 3 *
4 * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $
5 *
6 * Authors: 4 * Authors:
7 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 5 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
8 * 6 *
@@ -368,8 +366,8 @@ static int ipip_rcv(struct sk_buff *skb)
368 skb->protocol = htons(ETH_P_IP); 366 skb->protocol = htons(ETH_P_IP);
369 skb->pkt_type = PACKET_HOST; 367 skb->pkt_type = PACKET_HOST;
370 368
371 tunnel->stat.rx_packets++; 369 tunnel->dev->stats.rx_packets++;
372 tunnel->stat.rx_bytes += skb->len; 370 tunnel->dev->stats.rx_bytes += skb->len;
373 skb->dev = tunnel->dev; 371 skb->dev = tunnel->dev;
374 dst_release(skb->dst); 372 dst_release(skb->dst);
375 skb->dst = NULL; 373 skb->dst = NULL;
@@ -392,7 +390,7 @@ static int ipip_rcv(struct sk_buff *skb)
392static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 390static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
393{ 391{
394 struct ip_tunnel *tunnel = netdev_priv(dev); 392 struct ip_tunnel *tunnel = netdev_priv(dev);
395 struct net_device_stats *stats = &tunnel->stat; 393 struct net_device_stats *stats = &tunnel->dev->stats;
396 struct iphdr *tiph = &tunnel->parms.iph; 394 struct iphdr *tiph = &tunnel->parms.iph;
397 u8 tos = tunnel->parms.iph.tos; 395 u8 tos = tunnel->parms.iph.tos;
398 __be16 df = tiph->frag_off; 396 __be16 df = tiph->frag_off;
@@ -405,7 +403,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
405 int mtu; 403 int mtu;
406 404
407 if (tunnel->recursion++) { 405 if (tunnel->recursion++) {
408 tunnel->stat.collisions++; 406 stats->collisions++;
409 goto tx_error; 407 goto tx_error;
410 } 408 }
411 409
@@ -418,7 +416,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
418 if (!dst) { 416 if (!dst) {
419 /* NBMA tunnel */ 417 /* NBMA tunnel */
420 if ((rt = skb->rtable) == NULL) { 418 if ((rt = skb->rtable) == NULL) {
421 tunnel->stat.tx_fifo_errors++; 419 stats->tx_fifo_errors++;
422 goto tx_error; 420 goto tx_error;
423 } 421 }
424 if ((dst = rt->rt_gateway) == 0) 422 if ((dst = rt->rt_gateway) == 0)
@@ -433,7 +431,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
433 .tos = RT_TOS(tos) } }, 431 .tos = RT_TOS(tos) } },
434 .proto = IPPROTO_IPIP }; 432 .proto = IPPROTO_IPIP };
435 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 433 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
436 tunnel->stat.tx_carrier_errors++; 434 stats->tx_carrier_errors++;
437 goto tx_error_icmp; 435 goto tx_error_icmp;
438 } 436 }
439 } 437 }
@@ -441,7 +439,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
441 439
442 if (tdev == dev) { 440 if (tdev == dev) {
443 ip_rt_put(rt); 441 ip_rt_put(rt);
444 tunnel->stat.collisions++; 442 stats->collisions++;
445 goto tx_error; 443 goto tx_error;
446 } 444 }
447 445
@@ -451,7 +449,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
451 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 449 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
452 450
453 if (mtu < 68) { 451 if (mtu < 68) {
454 tunnel->stat.collisions++; 452 stats->collisions++;
455 ip_rt_put(rt); 453 ip_rt_put(rt);
456 goto tx_error; 454 goto tx_error;
457 } 455 }
@@ -685,11 +683,6 @@ done:
685 return err; 683 return err;
686} 684}
687 685
688static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
689{
690 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
691}
692
693static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) 686static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
694{ 687{
695 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 688 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
@@ -702,7 +695,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
702{ 695{
703 dev->uninit = ipip_tunnel_uninit; 696 dev->uninit = ipip_tunnel_uninit;
704 dev->hard_start_xmit = ipip_tunnel_xmit; 697 dev->hard_start_xmit = ipip_tunnel_xmit;
705 dev->get_stats = ipip_tunnel_get_stats;
706 dev->do_ioctl = ipip_tunnel_ioctl; 698 dev->do_ioctl = ipip_tunnel_ioctl;
707 dev->change_mtu = ipip_tunnel_change_mtu; 699 dev->change_mtu = ipip_tunnel_change_mtu;
708 dev->destructor = free_netdev; 700 dev->destructor = free_netdev;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 11700a4dcd95..438fab9c62a0 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -9,8 +9,6 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
13 *
14 * Fixes: 12 * Fixes:
15 * Michael Chastain : Incorrect size of copying. 13 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code 14 * Alan Cox : Added the cache manager code
@@ -181,26 +179,20 @@ static int reg_vif_num = -1;
181static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 179static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
182{ 180{
183 read_lock(&mrt_lock); 181 read_lock(&mrt_lock);
184 ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len; 182 dev->stats.tx_bytes += skb->len;
185 ((struct net_device_stats*)netdev_priv(dev))->tx_packets++; 183 dev->stats.tx_packets++;
186 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); 184 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
187 read_unlock(&mrt_lock); 185 read_unlock(&mrt_lock);
188 kfree_skb(skb); 186 kfree_skb(skb);
189 return 0; 187 return 0;
190} 188}
191 189
192static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
193{
194 return (struct net_device_stats*)netdev_priv(dev);
195}
196
197static void reg_vif_setup(struct net_device *dev) 190static void reg_vif_setup(struct net_device *dev)
198{ 191{
199 dev->type = ARPHRD_PIMREG; 192 dev->type = ARPHRD_PIMREG;
200 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 193 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
201 dev->flags = IFF_NOARP; 194 dev->flags = IFF_NOARP;
202 dev->hard_start_xmit = reg_vif_xmit; 195 dev->hard_start_xmit = reg_vif_xmit;
203 dev->get_stats = reg_vif_get_stats;
204 dev->destructor = free_netdev; 196 dev->destructor = free_netdev;
205} 197}
206 198
@@ -209,8 +201,7 @@ static struct net_device *ipmr_reg_vif(void)
209 struct net_device *dev; 201 struct net_device *dev;
210 struct in_device *in_dev; 202 struct in_device *in_dev;
211 203
212 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg", 204 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
213 reg_vif_setup);
214 205
215 if (dev == NULL) 206 if (dev == NULL)
216 return NULL; 207 return NULL;
@@ -1170,8 +1161,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1170 if (vif->flags & VIFF_REGISTER) { 1161 if (vif->flags & VIFF_REGISTER) {
1171 vif->pkt_out++; 1162 vif->pkt_out++;
1172 vif->bytes_out+=skb->len; 1163 vif->bytes_out+=skb->len;
1173 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len; 1164 vif->dev->stats.tx_bytes += skb->len;
1174 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++; 1165 vif->dev->stats.tx_packets++;
1175 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1166 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1176 kfree_skb(skb); 1167 kfree_skb(skb);
1177 return; 1168 return;
@@ -1230,8 +1221,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1230 if (vif->flags & VIFF_TUNNEL) { 1221 if (vif->flags & VIFF_TUNNEL) {
1231 ip_encap(skb, vif->local, vif->remote); 1222 ip_encap(skb, vif->local, vif->remote);
1232 /* FIXME: extra output firewall step used to be here. --RR */ 1223 /* FIXME: extra output firewall step used to be here. --RR */
1233 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++; 1224 vif->dev->stats.tx_packets++;
1234 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len; 1225 vif->dev->stats.tx_bytes += skb->len;
1235 } 1226 }
1236 1227
1237 IPCB(skb)->flags |= IPSKB_FORWARDED; 1228 IPCB(skb)->flags |= IPSKB_FORWARDED;
@@ -1487,8 +1478,8 @@ int pim_rcv_v1(struct sk_buff * skb)
1487 skb->pkt_type = PACKET_HOST; 1478 skb->pkt_type = PACKET_HOST;
1488 dst_release(skb->dst); 1479 dst_release(skb->dst);
1489 skb->dst = NULL; 1480 skb->dst = NULL;
1490 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1481 reg_dev->stats.rx_bytes += skb->len;
1491 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1482 reg_dev->stats.rx_packets++;
1492 nf_reset(skb); 1483 nf_reset(skb);
1493 netif_rx(skb); 1484 netif_rx(skb);
1494 dev_put(reg_dev); 1485 dev_put(reg_dev);
@@ -1542,8 +1533,8 @@ static int pim_rcv(struct sk_buff * skb)
1542 skb->ip_summed = 0; 1533 skb->ip_summed = 0;
1543 skb->pkt_type = PACKET_HOST; 1534 skb->pkt_type = PACKET_HOST;
1544 dst_release(skb->dst); 1535 dst_release(skb->dst);
1545 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1536 reg_dev->stats.rx_bytes += skb->len;
1546 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1537 reg_dev->stats.rx_packets++;
1547 skb->dst = NULL; 1538 skb->dst = NULL;
1548 nf_reset(skb); 1539 nf_reset(skb);
1549 netif_rx(skb); 1540 netif_rx(skb);
@@ -1887,16 +1878,36 @@ static struct net_protocol pim_protocol = {
1887 * Setup for IP multicast routing 1878 * Setup for IP multicast routing
1888 */ 1879 */
1889 1880
1890void __init ip_mr_init(void) 1881int __init ip_mr_init(void)
1891{ 1882{
1883 int err;
1884
1892 mrt_cachep = kmem_cache_create("ip_mrt_cache", 1885 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1893 sizeof(struct mfc_cache), 1886 sizeof(struct mfc_cache),
1894 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1887 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1895 NULL); 1888 NULL);
1889 if (!mrt_cachep)
1890 return -ENOMEM;
1891
1896 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); 1892 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
1897 register_netdevice_notifier(&ip_mr_notifier); 1893 err = register_netdevice_notifier(&ip_mr_notifier);
1894 if (err)
1895 goto reg_notif_fail;
1896#ifdef CONFIG_PROC_FS
1897 err = -ENOMEM;
1898 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1899 goto proc_vif_fail;
1900 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1901 goto proc_cache_fail;
1902#endif
1903 return 0;
1904reg_notif_fail:
1905 kmem_cache_destroy(mrt_cachep);
1898#ifdef CONFIG_PROC_FS 1906#ifdef CONFIG_PROC_FS
1899 proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops); 1907proc_vif_fail:
1900 proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops); 1908 unregister_netdevice_notifier(&ip_mr_notifier);
1909proc_cache_fail:
1910 proc_net_remove(&init_net, "ip_mr_vif");
1901#endif 1911#endif
1912 return err;
1902} 1913}
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 535abe0c45e7..1f1897a1a702 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_app.c: Application module support for IPVS 2 * ip_vs_app.c: Application module support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 65f1ba112752..f8bdae47a77f 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_conn.c,v 1.31 2003/04/18 09:03:16 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 963981a9d501..bcf6276ba4b2 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_core.c,v 1.34 2003/05/10 03:05:23 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 94c5767c8e01..9a5ace0b4dd6 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_ctl.c,v 1.36 2003/06/08 09:31:19 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * Julian Anastasov <ja@ssi.bg> 10 * Julian Anastasov <ja@ssi.bg>
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index dcf5d46aaa5e..8afc1503ed20 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Destination Hashing scheduling module 2 * IPVS: Destination Hashing scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_dh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * Inspired by the consistent hashing scheduler patch from 6 * Inspired by the consistent hashing scheduler patch from
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index dfa0d713c801..bc04eedd6dbb 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_est.c: simple rate estimator for IPVS 2 * ip_vs_est.c: simple rate estimator for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_est.c,v 1.4 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index 59aa166b7678..c1c758e4f733 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_ftp.c: IPVS ftp application module 2 * ip_vs_ftp.c: IPVS ftp application module
3 * 3 *
4 * Version: $Id: ip_vs_ftp.c,v 1.13 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * Changes: 6 * Changes:
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 3888642706ad..0efa3db4b180 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Locality-Based Least-Connection scheduling module 2 * IPVS: Locality-Based Least-Connection scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_lblc.c,v 1.10 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index daa260eb21cf..8e3bbeb45138 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Locality-Based Least-Connection with Replication scheduler 2 * IPVS: Locality-Based Least-Connection with Replication scheduler
3 * 3 *
4 * Version: $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index d88fef90a641..ac9f08e065d5 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Least-Connection Scheduling module 2 * IPVS: Least-Connection Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_lc.c,v 1.10 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index bc2a9e5f2a7b..a46bf258d420 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Never Queue scheduling module 2 * IPVS: Never Queue scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_nq.c,v 1.2 2003/06/08 09:31:19 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 4b1c16cbb16b..876714f23d65 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto.c: transport protocol load balancing support for IPVS 2 * ip_vs_proto.c: transport protocol load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto.c,v 1.2 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c
index 4bf835e1d86d..73e0ea87c1f5 100644
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ b/net/ipv4/ipvs/ip_vs_proto_ah.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS 2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_ah.c,v 1.1 2003/07/04 15:04:37 wensong Exp $
5 *
6 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
7 * Wensong Zhang <wensong@linuxvirtualserver.org> 5 * Wensong Zhang <wensong@linuxvirtualserver.org>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
index db6a6b7b1a0b..21d70c8ffa54 100644
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_esp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS 2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_esp.c,v 1.1 2003/07/04 15:04:37 wensong Exp $
5 *
6 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
7 * Wensong Zhang <wensong@linuxvirtualserver.org> 5 * Wensong Zhang <wensong@linuxvirtualserver.org>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index b83dc14b0a4d..d0ea467986a0 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS 2 * ip_vs_proto_tcp.c: TCP load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_tcp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index 75771cb3cd6f..c6be5d56823f 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS 2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_proto_udp.c,v 1.3 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index 433f8a947924..c8db12d39e61 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Round-Robin Scheduling module 2 * IPVS: Round-Robin Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_rr.c,v 1.9 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Peter Kese <peter.kese@ijs.si> 5 * Peter Kese <peter.kese@ijs.si>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index 121a32b1b756..b64767309855 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_sched.c,v 1.13 2003/05/10 03:05:23 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * Peter Kese <peter.kese@ijs.si> 9 * Peter Kese <peter.kese@ijs.si>
12 * 10 *
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index dd7c128f9db3..2a7d31358181 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Shortest Expected Delay scheduling module 2 * IPVS: Shortest Expected Delay scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_sed.c,v 1.1 2003/05/10 03:06:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index 1b25b00ef1e1..b8fdfac65001 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Source Hashing scheduling module 2 * IPVS: Source Hashing scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_sh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@gnuchina.org> 4 * Authors: Wensong Zhang <wensong@gnuchina.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index eff54efe0351..2d4a86f73325 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -5,8 +5,6 @@
5 * high-performance and highly available server based on a 5 * high-performance and highly available server based on a
6 * cluster of servers. 6 * cluster of servers.
7 * 7 *
8 * Version: $Id: ip_vs_sync.c,v 1.13 2003/06/08 09:31:19 wensong Exp $
9 *
10 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
11 * 9 *
12 * ip_vs_sync: sync connection info from master load balancer to backups 10 * ip_vs_sync: sync connection info from master load balancer to backups
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 8a9d913261d8..772c3cb4eca1 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Weighted Least-Connection Scheduling module 2 * IPVS: Weighted Least-Connection Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_wlc.c,v 1.13 2003/04/18 09:03:16 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Peter Kese <peter.kese@ijs.si> 5 * Peter Kese <peter.kese@ijs.si>
8 * 6 *
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 85c680add6df..1d6932d7dc97 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * IPVS: Weighted Round-Robin Scheduling module 2 * IPVS: Weighted Round-Robin Scheduling module
3 * 3 *
4 * Version: $Id: ip_vs_wrr.c,v 1.12 2002/09/15 08:14:08 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index f63006caea03..9892d4aca42e 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * ip_vs_xmit.c: various packet transmitters for IPVS 2 * ip_vs_xmit.c: various packet transmitters for IPVS
3 * 3 *
4 * Version: $Id: ip_vs_xmit.c,v 1.2 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> 4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Julian Anastasov <ja@ssi.bg> 5 * Julian Anastasov <ja@ssi.bg>
8 * 6 *
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2767841a8cef..6e251402506e 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -365,6 +365,18 @@ config IP_NF_RAW
365 If you want to compile it as a module, say M here and read 365 If you want to compile it as a module, say M here and read
366 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 366 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
367 367
368# security table for MAC policy
369config IP_NF_SECURITY
370 tristate "Security table"
371 depends on IP_NF_IPTABLES
372 depends on SECURITY
373 default m if NETFILTER_ADVANCED=n
374 help
375 This option adds a `security' table to iptables, for use
376 with Mandatory Access Control (MAC) policy.
377
378 If unsure, say N.
379
368# ARP tables 380# ARP tables
369config IP_NF_ARPTABLES 381config IP_NF_ARPTABLES
370 tristate "ARP tables support" 382 tristate "ARP tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index d9b92fbf5579..3f31291f37ce 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
42obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o 42obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
43obj-$(CONFIG_NF_NAT) += iptable_nat.o 43obj-$(CONFIG_NF_NAT) += iptable_nat.o
44obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o 44obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
45obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
45 46
46# matches 47# matches
47obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o 48obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 26a37cedcf2e..aa33a4a7a715 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -156,7 +156,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
156 case IPQ_COPY_META: 156 case IPQ_COPY_META:
157 case IPQ_COPY_NONE: 157 case IPQ_COPY_NONE:
158 size = NLMSG_SPACE(sizeof(*pmsg)); 158 size = NLMSG_SPACE(sizeof(*pmsg));
159 data_len = 0;
160 break; 159 break;
161 160
162 case IPQ_COPY_PACKET: 161 case IPQ_COPY_PACKET:
@@ -224,8 +223,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
224 return skb; 223 return skb;
225 224
226nlmsg_failure: 225nlmsg_failure:
227 if (skb)
228 kfree_skb(skb);
229 *errp = -EINVAL; 226 *errp = -EINVAL;
230 printk(KERN_ERR "ip_queue: error creating packet message\n"); 227 printk(KERN_ERR "ip_queue: error creating packet message\n");
231 return NULL; 228 return NULL;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
new file mode 100644
index 000000000000..2b472ac2263a
--- /dev/null
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -0,0 +1,180 @@
1/*
2 * "security" table
3 *
4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context
6 * to DAC.
7 *
8 * Based on iptable_mangle.c
9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/module.h>
19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <net/ip.h>
21
22MODULE_LICENSE("GPL");
23MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
24MODULE_DESCRIPTION("iptables security table, for MAC rules");
25
26#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
27 (1 << NF_INET_FORWARD) | \
28 (1 << NF_INET_LOCAL_OUT)
29
30static struct
31{
32 struct ipt_replace repl;
33 struct ipt_standard entries[3];
34 struct ipt_error term;
35} initial_table __initdata = {
36 .repl = {
37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS,
39 .num_entries = 4,
40 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
41 .hook_entry = {
42 [NF_INET_LOCAL_IN] = 0,
43 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
44 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
45 },
46 .underflow = {
47 [NF_INET_LOCAL_IN] = 0,
48 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
49 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
50 },
51 },
52 .entries = {
53 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
54 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
56 },
57 .term = IPT_ERROR_INIT, /* ERROR */
58};
59
60static struct xt_table security_table = {
61 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS,
63 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
64 .me = THIS_MODULE,
65 .af = AF_INET,
66};
67
68static unsigned int
69ipt_local_in_hook(unsigned int hook,
70 struct sk_buff *skb,
71 const struct net_device *in,
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{
75 return ipt_do_table(skb, hook, in, out,
76 nf_local_in_net(in, out)->ipv4.iptable_security);
77}
78
79static unsigned int
80ipt_forward_hook(unsigned int hook,
81 struct sk_buff *skb,
82 const struct net_device *in,
83 const struct net_device *out,
84 int (*okfn)(struct sk_buff *))
85{
86 return ipt_do_table(skb, hook, in, out,
87 nf_forward_net(in, out)->ipv4.iptable_security);
88}
89
90static unsigned int
91ipt_local_out_hook(unsigned int hook,
92 struct sk_buff *skb,
93 const struct net_device *in,
94 const struct net_device *out,
95 int (*okfn)(struct sk_buff *))
96{
97 /* Somebody is playing with raw sockets. */
98 if (skb->len < sizeof(struct iphdr)
99 || ip_hdrlen(skb) < sizeof(struct iphdr)) {
100 if (net_ratelimit())
101 printk(KERN_INFO "iptable_security: ignoring short "
102 "SOCK_RAW packet.\n");
103 return NF_ACCEPT;
104 }
105 return ipt_do_table(skb, hook, in, out,
106 nf_local_out_net(in, out)->ipv4.iptable_security);
107}
108
109static struct nf_hook_ops ipt_ops[] __read_mostly = {
110 {
111 .hook = ipt_local_in_hook,
112 .owner = THIS_MODULE,
113 .pf = PF_INET,
114 .hooknum = NF_INET_LOCAL_IN,
115 .priority = NF_IP_PRI_SECURITY,
116 },
117 {
118 .hook = ipt_forward_hook,
119 .owner = THIS_MODULE,
120 .pf = PF_INET,
121 .hooknum = NF_INET_FORWARD,
122 .priority = NF_IP_PRI_SECURITY,
123 },
124 {
125 .hook = ipt_local_out_hook,
126 .owner = THIS_MODULE,
127 .pf = PF_INET,
128 .hooknum = NF_INET_LOCAL_OUT,
129 .priority = NF_IP_PRI_SECURITY,
130 },
131};
132
133static int __net_init iptable_security_net_init(struct net *net)
134{
135 net->ipv4.iptable_security =
136 ipt_register_table(net, &security_table, &initial_table.repl);
137
138 if (IS_ERR(net->ipv4.iptable_security))
139 return PTR_ERR(net->ipv4.iptable_security);
140
141 return 0;
142}
143
144static void __net_exit iptable_security_net_exit(struct net *net)
145{
146 ipt_unregister_table(net->ipv4.iptable_security);
147}
148
149static struct pernet_operations iptable_security_net_ops = {
150 .init = iptable_security_net_init,
151 .exit = iptable_security_net_exit,
152};
153
154static int __init iptable_security_init(void)
155{
156 int ret;
157
158 ret = register_pernet_subsys(&iptable_security_net_ops);
159 if (ret < 0)
160 return ret;
161
162 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
163 if (ret < 0)
164 goto cleanup_table;
165
166 return ret;
167
168cleanup_table:
169 unregister_pernet_subsys(&iptable_security_net_ops);
170 return ret;
171}
172
173static void __exit iptable_security_fini(void)
174{
175 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
176 unregister_pernet_subsys(&iptable_security_net_ops);
177}
178
179module_init(iptable_security_init);
180module_exit(iptable_security_fini);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 78ab19accace..97791048fa9b 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -87,9 +87,8 @@ static int icmp_packet(struct nf_conn *ct,
87 means this will only run once even if count hits zero twice 87 means this will only run once even if count hits zero twice
88 (theoretically possible with SMP) */ 88 (theoretically possible with SMP) */
89 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 89 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
90 if (atomic_dec_and_test(&ct->proto.icmp.count) 90 if (atomic_dec_and_test(&ct->proto.icmp.count))
91 && del_timer(&ct->timeout)) 91 nf_ct_kill_acct(ct, ctinfo, skb);
92 ct->timeout.function((unsigned long)ct);
93 } else { 92 } else {
94 atomic_inc(&ct->proto.icmp.count); 93 atomic_inc(&ct->proto.icmp.count);
95 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); 94 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 552169b41b16..eb5cee279c5f 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -7,8 +7,6 @@
7 * PROC file system. It is mainly used for debugging and 7 * PROC file system. It is mainly used for debugging and
8 * statistics. 8 * statistics.
9 * 9 *
10 * Version: $Id: proc.c,v 1.45 2001/05/16 16:45:35 davem Exp $
11 *
12 * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de> 11 * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
14 * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de> 12 * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de>
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 971ab9356e51..ea50da0649fd 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * INET protocol dispatch tables. 6 * INET protocol dispatch tables.
7 * 7 *
8 * Version: $Id: protocol.c,v 1.14 2001/05/18 02:25:49 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 10 *
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 37a1ecd9d600..925fdf18cf92 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * RAW - implementation of IP "raw" sockets. 6 * RAW - implementation of IP "raw" sockets.
7 * 7 *
8 * Version: $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 10 *
@@ -608,12 +606,11 @@ static void raw_close(struct sock *sk, long timeout)
608 sk_common_release(sk); 606 sk_common_release(sk);
609} 607}
610 608
611static int raw_destroy(struct sock *sk) 609static void raw_destroy(struct sock *sk)
612{ 610{
613 lock_sock(sk); 611 lock_sock(sk);
614 ip_flush_pending_frames(sk); 612 ip_flush_pending_frames(sk);
615 release_sock(sk); 613 release_sock(sk);
616 return 0;
617} 614}
618 615
619/* This gets rid of all the nasties in af_inet. -DaveM */ 616/* This gets rid of all the nasties in af_inet. -DaveM */
@@ -947,7 +944,7 @@ static int raw_seq_show(struct seq_file *seq, void *v)
947 if (v == SEQ_START_TOKEN) 944 if (v == SEQ_START_TOKEN)
948 seq_printf(seq, " sl local_address rem_address st tx_queue " 945 seq_printf(seq, " sl local_address rem_address st tx_queue "
949 "rx_queue tr tm->when retrnsmt uid timeout " 946 "rx_queue tr tm->when retrnsmt uid timeout "
950 "inode drops\n"); 947 "inode ref pointer drops\n");
951 else 948 else
952 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); 949 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
953 return 0; 950 return 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 96be336064fb..113cd2512ba7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * ROUTE - implementation of the IP router. 6 * ROUTE - implementation of the IP router.
7 * 7 *
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
@@ -134,7 +132,6 @@ static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
134 132
135static void rt_worker_func(struct work_struct *work); 133static void rt_worker_func(struct work_struct *work);
136static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); 134static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
137static struct timer_list rt_secret_timer;
138 135
139/* 136/*
140 * Interface to generic destination cache. 137 * Interface to generic destination cache.
@@ -253,20 +250,25 @@ static inline void rt_hash_lock_init(void)
253static struct rt_hash_bucket *rt_hash_table __read_mostly; 250static struct rt_hash_bucket *rt_hash_table __read_mostly;
254static unsigned rt_hash_mask __read_mostly; 251static unsigned rt_hash_mask __read_mostly;
255static unsigned int rt_hash_log __read_mostly; 252static unsigned int rt_hash_log __read_mostly;
256static atomic_t rt_genid __read_mostly;
257 253
258static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 254static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
259#define RT_CACHE_STAT_INC(field) \ 255#define RT_CACHE_STAT_INC(field) \
260 (__raw_get_cpu_var(rt_cache_stat).field++) 256 (__raw_get_cpu_var(rt_cache_stat).field++)
261 257
262static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx) 258static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
259 int genid)
263{ 260{
264 return jhash_3words((__force u32)(__be32)(daddr), 261 return jhash_3words((__force u32)(__be32)(daddr),
265 (__force u32)(__be32)(saddr), 262 (__force u32)(__be32)(saddr),
266 idx, atomic_read(&rt_genid)) 263 idx, genid)
267 & rt_hash_mask; 264 & rt_hash_mask;
268} 265}
269 266
267static inline int rt_genid(struct net *net)
268{
269 return atomic_read(&net->ipv4.rt_genid);
270}
271
270#ifdef CONFIG_PROC_FS 272#ifdef CONFIG_PROC_FS
271struct rt_cache_iter_state { 273struct rt_cache_iter_state {
272 struct seq_net_private p; 274 struct seq_net_private p;
@@ -336,7 +338,7 @@ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
336 struct rt_cache_iter_state *st = seq->private; 338 struct rt_cache_iter_state *st = seq->private;
337 if (*pos) 339 if (*pos)
338 return rt_cache_get_idx(seq, *pos - 1); 340 return rt_cache_get_idx(seq, *pos - 1);
339 st->genid = atomic_read(&rt_genid); 341 st->genid = rt_genid(seq_file_net(seq));
340 return SEQ_START_TOKEN; 342 return SEQ_START_TOKEN;
341} 343}
342 344
@@ -683,6 +685,11 @@ static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
683 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); 685 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
684} 686}
685 687
688static inline int rt_is_expired(struct rtable *rth)
689{
690 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
691}
692
686/* 693/*
687 * Perform a full scan of hash table and free all entries. 694 * Perform a full scan of hash table and free all entries.
688 * Can be called by a softirq or a process. 695 * Can be called by a softirq or a process.
@@ -692,6 +699,7 @@ static void rt_do_flush(int process_context)
692{ 699{
693 unsigned int i; 700 unsigned int i;
694 struct rtable *rth, *next; 701 struct rtable *rth, *next;
702 struct rtable * tail;
695 703
696 for (i = 0; i <= rt_hash_mask; i++) { 704 for (i = 0; i <= rt_hash_mask; i++) {
697 if (process_context && need_resched()) 705 if (process_context && need_resched())
@@ -701,11 +709,39 @@ static void rt_do_flush(int process_context)
701 continue; 709 continue;
702 710
703 spin_lock_bh(rt_hash_lock_addr(i)); 711 spin_lock_bh(rt_hash_lock_addr(i));
712#ifdef CONFIG_NET_NS
713 {
714 struct rtable ** prev, * p;
715
716 rth = rt_hash_table[i].chain;
717
718 /* defer releasing the head of the list after spin_unlock */
719 for (tail = rth; tail; tail = tail->u.dst.rt_next)
720 if (!rt_is_expired(tail))
721 break;
722 if (rth != tail)
723 rt_hash_table[i].chain = tail;
724
725 /* call rt_free on entries after the tail requiring flush */
726 prev = &rt_hash_table[i].chain;
727 for (p = *prev; p; p = next) {
728 next = p->u.dst.rt_next;
729 if (!rt_is_expired(p)) {
730 prev = &p->u.dst.rt_next;
731 } else {
732 *prev = next;
733 rt_free(p);
734 }
735 }
736 }
737#else
704 rth = rt_hash_table[i].chain; 738 rth = rt_hash_table[i].chain;
705 rt_hash_table[i].chain = NULL; 739 rt_hash_table[i].chain = NULL;
740 tail = NULL;
741#endif
706 spin_unlock_bh(rt_hash_lock_addr(i)); 742 spin_unlock_bh(rt_hash_lock_addr(i));
707 743
708 for (; rth; rth = next) { 744 for (; rth != tail; rth = next) {
709 next = rth->u.dst.rt_next; 745 next = rth->u.dst.rt_next;
710 rt_free(rth); 746 rt_free(rth);
711 } 747 }
@@ -738,7 +774,7 @@ static void rt_check_expire(void)
738 continue; 774 continue;
739 spin_lock_bh(rt_hash_lock_addr(i)); 775 spin_lock_bh(rt_hash_lock_addr(i));
740 while ((rth = *rthp) != NULL) { 776 while ((rth = *rthp) != NULL) {
741 if (rth->rt_genid != atomic_read(&rt_genid)) { 777 if (rt_is_expired(rth)) {
742 *rthp = rth->u.dst.rt_next; 778 *rthp = rth->u.dst.rt_next;
743 rt_free(rth); 779 rt_free(rth);
744 continue; 780 continue;
@@ -781,21 +817,21 @@ static void rt_worker_func(struct work_struct *work)
781 * many times (2^24) without giving recent rt_genid. 817 * many times (2^24) without giving recent rt_genid.
782 * Jenkins hash is strong enough that litle changes of rt_genid are OK. 818 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
783 */ 819 */
784static void rt_cache_invalidate(void) 820static void rt_cache_invalidate(struct net *net)
785{ 821{
786 unsigned char shuffle; 822 unsigned char shuffle;
787 823
788 get_random_bytes(&shuffle, sizeof(shuffle)); 824 get_random_bytes(&shuffle, sizeof(shuffle));
789 atomic_add(shuffle + 1U, &rt_genid); 825 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
790} 826}
791 827
792/* 828/*
793 * delay < 0 : invalidate cache (fast : entries will be deleted later) 829 * delay < 0 : invalidate cache (fast : entries will be deleted later)
794 * delay >= 0 : invalidate & flush cache (can be long) 830 * delay >= 0 : invalidate & flush cache (can be long)
795 */ 831 */
796void rt_cache_flush(int delay) 832void rt_cache_flush(struct net *net, int delay)
797{ 833{
798 rt_cache_invalidate(); 834 rt_cache_invalidate(net);
799 if (delay >= 0) 835 if (delay >= 0)
800 rt_do_flush(!in_softirq()); 836 rt_do_flush(!in_softirq());
801} 837}
@@ -803,10 +839,11 @@ void rt_cache_flush(int delay)
803/* 839/*
804 * We change rt_genid and let gc do the cleanup 840 * We change rt_genid and let gc do the cleanup
805 */ 841 */
806static void rt_secret_rebuild(unsigned long dummy) 842static void rt_secret_rebuild(unsigned long __net)
807{ 843{
808 rt_cache_invalidate(); 844 struct net *net = (struct net *)__net;
809 mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval); 845 rt_cache_invalidate(net);
846 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
810} 847}
811 848
812/* 849/*
@@ -882,7 +919,7 @@ static int rt_garbage_collect(struct dst_ops *ops)
882 rthp = &rt_hash_table[k].chain; 919 rthp = &rt_hash_table[k].chain;
883 spin_lock_bh(rt_hash_lock_addr(k)); 920 spin_lock_bh(rt_hash_lock_addr(k));
884 while ((rth = *rthp) != NULL) { 921 while ((rth = *rthp) != NULL) {
885 if (rth->rt_genid == atomic_read(&rt_genid) && 922 if (!rt_is_expired(rth) &&
886 !rt_may_expire(rth, tmo, expire)) { 923 !rt_may_expire(rth, tmo, expire)) {
887 tmo >>= 1; 924 tmo >>= 1;
888 rthp = &rth->u.dst.rt_next; 925 rthp = &rth->u.dst.rt_next;
@@ -964,7 +1001,7 @@ restart:
964 1001
965 spin_lock_bh(rt_hash_lock_addr(hash)); 1002 spin_lock_bh(rt_hash_lock_addr(hash));
966 while ((rth = *rthp) != NULL) { 1003 while ((rth = *rthp) != NULL) {
967 if (rth->rt_genid != atomic_read(&rt_genid)) { 1004 if (rt_is_expired(rth)) {
968 *rthp = rth->u.dst.rt_next; 1005 *rthp = rth->u.dst.rt_next;
969 rt_free(rth); 1006 rt_free(rth);
970 continue; 1007 continue;
@@ -1140,7 +1177,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
1140 spin_lock_bh(rt_hash_lock_addr(hash)); 1177 spin_lock_bh(rt_hash_lock_addr(hash));
1141 ip_rt_put(rt); 1178 ip_rt_put(rt);
1142 while ((aux = *rthp) != NULL) { 1179 while ((aux = *rthp) != NULL) {
1143 if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) { 1180 if (aux == rt || rt_is_expired(aux)) {
1144 *rthp = aux->u.dst.rt_next; 1181 *rthp = aux->u.dst.rt_next;
1145 rt_free(aux); 1182 rt_free(aux);
1146 continue; 1183 continue;
@@ -1182,7 +1219,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1182 1219
1183 for (i = 0; i < 2; i++) { 1220 for (i = 0; i < 2; i++) {
1184 for (k = 0; k < 2; k++) { 1221 for (k = 0; k < 2; k++) {
1185 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); 1222 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1223 rt_genid(net));
1186 1224
1187 rthp=&rt_hash_table[hash].chain; 1225 rthp=&rt_hash_table[hash].chain;
1188 1226
@@ -1194,7 +1232,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1194 rth->fl.fl4_src != skeys[i] || 1232 rth->fl.fl4_src != skeys[i] ||
1195 rth->fl.oif != ikeys[k] || 1233 rth->fl.oif != ikeys[k] ||
1196 rth->fl.iif != 0 || 1234 rth->fl.iif != 0 ||
1197 rth->rt_genid != atomic_read(&rt_genid) || 1235 rt_is_expired(rth) ||
1198 !net_eq(dev_net(rth->u.dst.dev), net)) { 1236 !net_eq(dev_net(rth->u.dst.dev), net)) {
1199 rthp = &rth->u.dst.rt_next; 1237 rthp = &rth->u.dst.rt_next;
1200 continue; 1238 continue;
@@ -1233,7 +1271,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1233 rt->u.dst.neighbour = NULL; 1271 rt->u.dst.neighbour = NULL;
1234 rt->u.dst.hh = NULL; 1272 rt->u.dst.hh = NULL;
1235 rt->u.dst.xfrm = NULL; 1273 rt->u.dst.xfrm = NULL;
1236 rt->rt_genid = atomic_read(&rt_genid); 1274 rt->rt_genid = rt_genid(net);
1237 rt->rt_flags |= RTCF_REDIRECTED; 1275 rt->rt_flags |= RTCF_REDIRECTED;
1238 1276
1239 /* Gateway is different ... */ 1277 /* Gateway is different ... */
@@ -1297,7 +1335,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1297 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1335 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1298 rt->u.dst.expires) { 1336 rt->u.dst.expires) {
1299 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1337 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1300 rt->fl.oif); 1338 rt->fl.oif,
1339 rt_genid(dev_net(dst->dev)));
1301#if RT_CACHE_DEBUG >= 1 1340#if RT_CACHE_DEBUG >= 1
1302 printk(KERN_DEBUG "ipv4_negative_advice: redirect to " 1341 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1303 NIPQUAD_FMT "/%02x dropped\n", 1342 NIPQUAD_FMT "/%02x dropped\n",
@@ -1446,7 +1485,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1446 1485
1447 for (k = 0; k < 2; k++) { 1486 for (k = 0; k < 2; k++) {
1448 for (i = 0; i < 2; i++) { 1487 for (i = 0; i < 2; i++) {
1449 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); 1488 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1489 rt_genid(net));
1450 1490
1451 rcu_read_lock(); 1491 rcu_read_lock();
1452 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1492 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -1461,7 +1501,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1461 rth->fl.iif != 0 || 1501 rth->fl.iif != 0 ||
1462 dst_metric_locked(&rth->u.dst, RTAX_MTU) || 1502 dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
1463 !net_eq(dev_net(rth->u.dst.dev), net) || 1503 !net_eq(dev_net(rth->u.dst.dev), net) ||
1464 rth->rt_genid != atomic_read(&rt_genid)) 1504 !rt_is_expired(rth))
1465 continue; 1505 continue;
1466 1506
1467 if (new_mtu < 68 || new_mtu >= old_mtu) { 1507 if (new_mtu < 68 || new_mtu >= old_mtu) {
@@ -1696,7 +1736,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1696 rth->fl.oif = 0; 1736 rth->fl.oif = 0;
1697 rth->rt_gateway = daddr; 1737 rth->rt_gateway = daddr;
1698 rth->rt_spec_dst= spec_dst; 1738 rth->rt_spec_dst= spec_dst;
1699 rth->rt_genid = atomic_read(&rt_genid); 1739 rth->rt_genid = rt_genid(dev_net(dev));
1700 rth->rt_flags = RTCF_MULTICAST; 1740 rth->rt_flags = RTCF_MULTICAST;
1701 rth->rt_type = RTN_MULTICAST; 1741 rth->rt_type = RTN_MULTICAST;
1702 if (our) { 1742 if (our) {
@@ -1711,7 +1751,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1711 RT_CACHE_STAT_INC(in_slow_mc); 1751 RT_CACHE_STAT_INC(in_slow_mc);
1712 1752
1713 in_dev_put(in_dev); 1753 in_dev_put(in_dev);
1714 hash = rt_hash(daddr, saddr, dev->ifindex); 1754 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1715 return rt_intern_hash(hash, rth, &skb->rtable); 1755 return rt_intern_hash(hash, rth, &skb->rtable);
1716 1756
1717e_nobufs: 1757e_nobufs:
@@ -1837,7 +1877,7 @@ static int __mkroute_input(struct sk_buff *skb,
1837 1877
1838 rth->u.dst.input = ip_forward; 1878 rth->u.dst.input = ip_forward;
1839 rth->u.dst.output = ip_output; 1879 rth->u.dst.output = ip_output;
1840 rth->rt_genid = atomic_read(&rt_genid); 1880 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
1841 1881
1842 rt_set_nexthop(rth, res, itag); 1882 rt_set_nexthop(rth, res, itag);
1843 1883
@@ -1872,7 +1912,8 @@ static int ip_mkroute_input(struct sk_buff *skb,
1872 return err; 1912 return err;
1873 1913
1874 /* put it into the cache */ 1914 /* put it into the cache */
1875 hash = rt_hash(daddr, saddr, fl->iif); 1915 hash = rt_hash(daddr, saddr, fl->iif,
1916 rt_genid(dev_net(rth->u.dst.dev)));
1876 return rt_intern_hash(hash, rth, &skb->rtable); 1917 return rt_intern_hash(hash, rth, &skb->rtable);
1877} 1918}
1878 1919
@@ -1998,7 +2039,7 @@ local_input:
1998 goto e_nobufs; 2039 goto e_nobufs;
1999 2040
2000 rth->u.dst.output= ip_rt_bug; 2041 rth->u.dst.output= ip_rt_bug;
2001 rth->rt_genid = atomic_read(&rt_genid); 2042 rth->rt_genid = rt_genid(net);
2002 2043
2003 atomic_set(&rth->u.dst.__refcnt, 1); 2044 atomic_set(&rth->u.dst.__refcnt, 1);
2004 rth->u.dst.flags= DST_HOST; 2045 rth->u.dst.flags= DST_HOST;
@@ -2028,7 +2069,7 @@ local_input:
2028 rth->rt_flags &= ~RTCF_LOCAL; 2069 rth->rt_flags &= ~RTCF_LOCAL;
2029 } 2070 }
2030 rth->rt_type = res.type; 2071 rth->rt_type = res.type;
2031 hash = rt_hash(daddr, saddr, fl.iif); 2072 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2032 err = rt_intern_hash(hash, rth, &skb->rtable); 2073 err = rt_intern_hash(hash, rth, &skb->rtable);
2033 goto done; 2074 goto done;
2034 2075
@@ -2079,7 +2120,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2079 2120
2080 net = dev_net(dev); 2121 net = dev_net(dev);
2081 tos &= IPTOS_RT_MASK; 2122 tos &= IPTOS_RT_MASK;
2082 hash = rt_hash(daddr, saddr, iif); 2123 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2083 2124
2084 rcu_read_lock(); 2125 rcu_read_lock();
2085 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2126 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2091,7 +2132,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2091 (rth->fl.fl4_tos ^ tos)) == 0 && 2132 (rth->fl.fl4_tos ^ tos)) == 0 &&
2092 rth->fl.mark == skb->mark && 2133 rth->fl.mark == skb->mark &&
2093 net_eq(dev_net(rth->u.dst.dev), net) && 2134 net_eq(dev_net(rth->u.dst.dev), net) &&
2094 rth->rt_genid == atomic_read(&rt_genid)) { 2135 !rt_is_expired(rth)) {
2095 dst_use(&rth->u.dst, jiffies); 2136 dst_use(&rth->u.dst, jiffies);
2096 RT_CACHE_STAT_INC(in_hit); 2137 RT_CACHE_STAT_INC(in_hit);
2097 rcu_read_unlock(); 2138 rcu_read_unlock();
@@ -2219,7 +2260,7 @@ static int __mkroute_output(struct rtable **result,
2219 rth->rt_spec_dst= fl->fl4_src; 2260 rth->rt_spec_dst= fl->fl4_src;
2220 2261
2221 rth->u.dst.output=ip_output; 2262 rth->u.dst.output=ip_output;
2222 rth->rt_genid = atomic_read(&rt_genid); 2263 rth->rt_genid = rt_genid(dev_net(dev_out));
2223 2264
2224 RT_CACHE_STAT_INC(out_slow_tot); 2265 RT_CACHE_STAT_INC(out_slow_tot);
2225 2266
@@ -2268,7 +2309,8 @@ static int ip_mkroute_output(struct rtable **rp,
2268 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); 2309 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2269 unsigned hash; 2310 unsigned hash;
2270 if (err == 0) { 2311 if (err == 0) {
2271 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); 2312 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2313 rt_genid(dev_net(dev_out)));
2272 err = rt_intern_hash(hash, rth, rp); 2314 err = rt_intern_hash(hash, rth, rp);
2273 } 2315 }
2274 2316
@@ -2480,7 +2522,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2480 unsigned hash; 2522 unsigned hash;
2481 struct rtable *rth; 2523 struct rtable *rth;
2482 2524
2483 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif); 2525 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2484 2526
2485 rcu_read_lock_bh(); 2527 rcu_read_lock_bh();
2486 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2528 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2493,7 +2535,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2493 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2535 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2494 (IPTOS_RT_MASK | RTO_ONLINK)) && 2536 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2495 net_eq(dev_net(rth->u.dst.dev), net) && 2537 net_eq(dev_net(rth->u.dst.dev), net) &&
2496 rth->rt_genid == atomic_read(&rt_genid)) { 2538 !rt_is_expired(rth)) {
2497 dst_use(&rth->u.dst, jiffies); 2539 dst_use(&rth->u.dst, jiffies);
2498 RT_CACHE_STAT_INC(out_hit); 2540 RT_CACHE_STAT_INC(out_hit);
2499 rcu_read_unlock_bh(); 2541 rcu_read_unlock_bh();
@@ -2524,7 +2566,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2524}; 2566};
2525 2567
2526 2568
2527static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp) 2569static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2528{ 2570{
2529 struct rtable *ort = *rp; 2571 struct rtable *ort = *rp;
2530 struct rtable *rt = (struct rtable *) 2572 struct rtable *rt = (struct rtable *)
@@ -2548,7 +2590,7 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp)
2548 rt->idev = ort->idev; 2590 rt->idev = ort->idev;
2549 if (rt->idev) 2591 if (rt->idev)
2550 in_dev_hold(rt->idev); 2592 in_dev_hold(rt->idev);
2551 rt->rt_genid = atomic_read(&rt_genid); 2593 rt->rt_genid = rt_genid(net);
2552 rt->rt_flags = ort->rt_flags; 2594 rt->rt_flags = ort->rt_flags;
2553 rt->rt_type = ort->rt_type; 2595 rt->rt_type = ort->rt_type;
2554 rt->rt_dst = ort->rt_dst; 2596 rt->rt_dst = ort->rt_dst;
@@ -2584,7 +2626,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2584 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, 2626 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
2585 flags ? XFRM_LOOKUP_WAIT : 0); 2627 flags ? XFRM_LOOKUP_WAIT : 0);
2586 if (err == -EREMOTE) 2628 if (err == -EREMOTE)
2587 err = ipv4_dst_blackhole(rp, flp); 2629 err = ipv4_dst_blackhole(net, rp, flp);
2588 2630
2589 return err; 2631 return err;
2590 } 2632 }
@@ -2803,7 +2845,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2803 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2845 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2804 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 2846 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
2805 continue; 2847 continue;
2806 if (rt->rt_genid != atomic_read(&rt_genid)) 2848 if (rt_is_expired(rt))
2807 continue; 2849 continue;
2808 skb->dst = dst_clone(&rt->u.dst); 2850 skb->dst = dst_clone(&rt->u.dst);
2809 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2851 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -2827,19 +2869,27 @@ done:
2827 2869
2828void ip_rt_multicast_event(struct in_device *in_dev) 2870void ip_rt_multicast_event(struct in_device *in_dev)
2829{ 2871{
2830 rt_cache_flush(0); 2872 rt_cache_flush(dev_net(in_dev->dev), 0);
2831} 2873}
2832 2874
2833#ifdef CONFIG_SYSCTL 2875#ifdef CONFIG_SYSCTL
2834static int flush_delay;
2835
2836static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write, 2876static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2837 struct file *filp, void __user *buffer, 2877 struct file *filp, void __user *buffer,
2838 size_t *lenp, loff_t *ppos) 2878 size_t *lenp, loff_t *ppos)
2839{ 2879{
2840 if (write) { 2880 if (write) {
2881 int flush_delay;
2882 struct net *net;
2883 static DEFINE_MUTEX(flush_mutex);
2884
2885 mutex_lock(&flush_mutex);
2886 ctl->data = &flush_delay;
2841 proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 2887 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2842 rt_cache_flush(flush_delay); 2888 ctl->data = NULL;
2889 mutex_unlock(&flush_mutex);
2890
2891 net = (struct net *)ctl->extra1;
2892 rt_cache_flush(net, flush_delay);
2843 return 0; 2893 return 0;
2844 } 2894 }
2845 2895
@@ -2855,25 +2905,18 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2855 size_t newlen) 2905 size_t newlen)
2856{ 2906{
2857 int delay; 2907 int delay;
2908 struct net *net;
2858 if (newlen != sizeof(int)) 2909 if (newlen != sizeof(int))
2859 return -EINVAL; 2910 return -EINVAL;
2860 if (get_user(delay, (int __user *)newval)) 2911 if (get_user(delay, (int __user *)newval))
2861 return -EFAULT; 2912 return -EFAULT;
2862 rt_cache_flush(delay); 2913 net = (struct net *)table->extra1;
2914 rt_cache_flush(net, delay);
2863 return 0; 2915 return 0;
2864} 2916}
2865 2917
2866ctl_table ipv4_route_table[] = { 2918ctl_table ipv4_route_table[] = {
2867 { 2919 {
2868 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2869 .procname = "flush",
2870 .data = &flush_delay,
2871 .maxlen = sizeof(int),
2872 .mode = 0200,
2873 .proc_handler = &ipv4_sysctl_rtcache_flush,
2874 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2875 },
2876 {
2877 .ctl_name = NET_IPV4_ROUTE_GC_THRESH, 2920 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2878 .procname = "gc_thresh", 2921 .procname = "gc_thresh",
2879 .data = &ipv4_dst_ops.gc_thresh, 2922 .data = &ipv4_dst_ops.gc_thresh,
@@ -3011,8 +3054,97 @@ ctl_table ipv4_route_table[] = {
3011 }, 3054 },
3012 { .ctl_name = 0 } 3055 { .ctl_name = 0 }
3013}; 3056};
3057
3058static __net_initdata struct ctl_path ipv4_route_path[] = {
3059 { .procname = "net", .ctl_name = CTL_NET, },
3060 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3061 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3062 { },
3063};
3064
3065
3066static struct ctl_table ipv4_route_flush_table[] = {
3067 {
3068 .ctl_name = NET_IPV4_ROUTE_FLUSH,
3069 .procname = "flush",
3070 .maxlen = sizeof(int),
3071 .mode = 0200,
3072 .proc_handler = &ipv4_sysctl_rtcache_flush,
3073 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
3074 },
3075 { .ctl_name = 0 },
3076};
3077
3078static __net_init int sysctl_route_net_init(struct net *net)
3079{
3080 struct ctl_table *tbl;
3081
3082 tbl = ipv4_route_flush_table;
3083 if (net != &init_net) {
3084 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3085 if (tbl == NULL)
3086 goto err_dup;
3087 }
3088 tbl[0].extra1 = net;
3089
3090 net->ipv4.route_hdr =
3091 register_net_sysctl_table(net, ipv4_route_path, tbl);
3092 if (net->ipv4.route_hdr == NULL)
3093 goto err_reg;
3094 return 0;
3095
3096err_reg:
3097 if (tbl != ipv4_route_flush_table)
3098 kfree(tbl);
3099err_dup:
3100 return -ENOMEM;
3101}
3102
3103static __net_exit void sysctl_route_net_exit(struct net *net)
3104{
3105 struct ctl_table *tbl;
3106
3107 tbl = net->ipv4.route_hdr->ctl_table_arg;
3108 unregister_net_sysctl_table(net->ipv4.route_hdr);
3109 BUG_ON(tbl == ipv4_route_flush_table);
3110 kfree(tbl);
3111}
3112
3113static __net_initdata struct pernet_operations sysctl_route_ops = {
3114 .init = sysctl_route_net_init,
3115 .exit = sysctl_route_net_exit,
3116};
3014#endif 3117#endif
3015 3118
3119
3120static __net_init int rt_secret_timer_init(struct net *net)
3121{
3122 atomic_set(&net->ipv4.rt_genid,
3123 (int) ((num_physpages ^ (num_physpages>>8)) ^
3124 (jiffies ^ (jiffies >> 7))));
3125
3126 net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3127 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3128 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3129
3130 net->ipv4.rt_secret_timer.expires =
3131 jiffies + net_random() % ip_rt_secret_interval +
3132 ip_rt_secret_interval;
3133 add_timer(&net->ipv4.rt_secret_timer);
3134 return 0;
3135}
3136
3137static __net_exit void rt_secret_timer_exit(struct net *net)
3138{
3139 del_timer_sync(&net->ipv4.rt_secret_timer);
3140}
3141
3142static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3143 .init = rt_secret_timer_init,
3144 .exit = rt_secret_timer_exit,
3145};
3146
3147
3016#ifdef CONFIG_NET_CLS_ROUTE 3148#ifdef CONFIG_NET_CLS_ROUTE
3017struct ip_rt_acct *ip_rt_acct __read_mostly; 3149struct ip_rt_acct *ip_rt_acct __read_mostly;
3018#endif /* CONFIG_NET_CLS_ROUTE */ 3150#endif /* CONFIG_NET_CLS_ROUTE */
@@ -3031,9 +3163,6 @@ int __init ip_rt_init(void)
3031{ 3163{
3032 int rc = 0; 3164 int rc = 0;
3033 3165
3034 atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^
3035 (jiffies ^ (jiffies >> 7))));
3036
3037#ifdef CONFIG_NET_CLS_ROUTE 3166#ifdef CONFIG_NET_CLS_ROUTE
3038 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); 3167 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
3039 if (!ip_rt_acct) 3168 if (!ip_rt_acct)
@@ -3065,19 +3194,14 @@ int __init ip_rt_init(void)
3065 devinet_init(); 3194 devinet_init();
3066 ip_fib_init(); 3195 ip_fib_init();
3067 3196
3068 rt_secret_timer.function = rt_secret_rebuild;
3069 rt_secret_timer.data = 0;
3070 init_timer_deferrable(&rt_secret_timer);
3071
3072 /* All the timers, started at system startup tend 3197 /* All the timers, started at system startup tend
3073 to synchronize. Perturb it a bit. 3198 to synchronize. Perturb it a bit.
3074 */ 3199 */
3075 schedule_delayed_work(&expires_work, 3200 schedule_delayed_work(&expires_work,
3076 net_random() % ip_rt_gc_interval + ip_rt_gc_interval); 3201 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3077 3202
3078 rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval + 3203 if (register_pernet_subsys(&rt_secret_timer_ops))
3079 ip_rt_secret_interval; 3204 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3080 add_timer(&rt_secret_timer);
3081 3205
3082 if (ip_rt_proc_init()) 3206 if (ip_rt_proc_init())
3083 printk(KERN_ERR "Unable to create route proc files\n"); 3207 printk(KERN_ERR "Unable to create route proc files\n");
@@ -3087,6 +3211,9 @@ int __init ip_rt_init(void)
3087#endif 3211#endif
3088 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); 3212 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3089 3213
3214#ifdef CONFIG_SYSCTL
3215 register_pernet_subsys(&sysctl_route_ops);
3216#endif
3090 return rc; 3217 return rc;
3091} 3218}
3092 3219
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index d182a2a26291..fdde2ae07e24 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -8,8 +8,6 @@
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 *
12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $
13 */ 11 */
14 12
15#include <linux/tcp.h> 13#include <linux/tcp.h>
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index c437f804ee38..14ef202a2254 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem. 2 * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem.
3 * 3 *
4 * $Id: sysctl_net_ipv4.c,v 1.50 2001/10/20 00:00:11 davem Exp $
5 *
6 * Begun April 1, 1996, Mike Shaver. 4 * Begun April 1, 1996, Mike Shaver.
7 * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS] 5 * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS]
8 */ 6 */
@@ -795,7 +793,8 @@ static struct ctl_table ipv4_net_table[] = {
795 .data = &init_net.ipv4.sysctl_icmp_ratelimit, 793 .data = &init_net.ipv4.sysctl_icmp_ratelimit,
796 .maxlen = sizeof(int), 794 .maxlen = sizeof(int),
797 .mode = 0644, 795 .mode = 0644,
798 .proc_handler = &proc_dointvec 796 .proc_handler = &proc_dointvec_ms_jiffies,
797 .strategy = &sysctl_ms_jiffies
799 }, 798 },
800 { 799 {
801 .ctl_name = NET_IPV4_ICMP_RATEMASK, 800 .ctl_name = NET_IPV4_ICMP_RATEMASK,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1d723de18686..56a133c61452 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -2467,6 +2465,76 @@ static unsigned long tcp_md5sig_users;
2467static struct tcp_md5sig_pool **tcp_md5sig_pool; 2465static struct tcp_md5sig_pool **tcp_md5sig_pool;
2468static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2466static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2469 2467
2468int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
2469 int bplen,
2470 struct tcphdr *th, unsigned int tcplen,
2471 struct tcp_md5sig_pool *hp)
2472{
2473 struct scatterlist sg[4];
2474 __u16 data_len;
2475 int block = 0;
2476 __sum16 cksum;
2477 struct hash_desc *desc = &hp->md5_desc;
2478 int err;
2479 unsigned int nbytes = 0;
2480
2481 sg_init_table(sg, 4);
2482
2483 /* 1. The TCP pseudo-header */
2484 sg_set_buf(&sg[block++], &hp->md5_blk, bplen);
2485 nbytes += bplen;
2486
2487 /* 2. The TCP header, excluding options, and assuming a
2488 * checksum of zero
2489 */
2490 cksum = th->check;
2491 th->check = 0;
2492 sg_set_buf(&sg[block++], th, sizeof(*th));
2493 nbytes += sizeof(*th);
2494
2495 /* 3. The TCP segment data (if any) */
2496 data_len = tcplen - (th->doff << 2);
2497 if (data_len > 0) {
2498 u8 *data = (u8 *)th + (th->doff << 2);
2499 sg_set_buf(&sg[block++], data, data_len);
2500 nbytes += data_len;
2501 }
2502
2503 /* 4. an independently-specified key or password, known to both
2504 * TCPs and presumably connection-specific
2505 */
2506 sg_set_buf(&sg[block++], key->key, key->keylen);
2507 nbytes += key->keylen;
2508
2509 sg_mark_end(&sg[block - 1]);
2510
2511 /* Now store the hash into the packet */
2512 err = crypto_hash_init(desc);
2513 if (err) {
2514 if (net_ratelimit())
2515 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
2516 return -1;
2517 }
2518 err = crypto_hash_update(desc, sg, nbytes);
2519 if (err) {
2520 if (net_ratelimit())
2521 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
2522 return -1;
2523 }
2524 err = crypto_hash_final(desc, md5_hash);
2525 if (err) {
2526 if (net_ratelimit())
2527 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
2528 return -1;
2529 }
2530
2531 /* Reset header */
2532 th->check = cksum;
2533
2534 return 0;
2535}
2536EXPORT_SYMBOL(tcp_calc_md5_hash);
2537
2470static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2538static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2471{ 2539{
2472 int cpu; 2540 int cpu;
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 2fbcc7d1b1a0..838d491dfda7 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * tcp_diag.c Module for monitoring TCP transport protocols sockets. 2 * tcp_diag.c Module for monitoring TCP transport protocols sockets.
3 * 3 *
4 * Version: $Id: tcp_diag.c,v 1.3 2002/02/01 22:01:04 davem Exp $
5 *
6 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
7 * 5 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cad73b7dfef0..d6ea970a1513 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -949,17 +947,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
949{ 947{
950 struct tcp_sock *tp = tcp_sk(sk); 948 struct tcp_sock *tp = tcp_sk(sk);
951 if (metric > tp->reordering) { 949 if (metric > tp->reordering) {
950 int mib_idx;
951
952 tp->reordering = min(TCP_MAX_REORDERING, metric); 952 tp->reordering = min(TCP_MAX_REORDERING, metric);
953 953
954 /* This exciting event is worth to be remembered. 8) */ 954 /* This exciting event is worth to be remembered. 8) */
955 if (ts) 955 if (ts)
956 NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); 956 mib_idx = LINUX_MIB_TCPTSREORDER;
957 else if (tcp_is_reno(tp)) 957 else if (tcp_is_reno(tp))
958 NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); 958 mib_idx = LINUX_MIB_TCPRENOREORDER;
959 else if (tcp_is_fack(tp)) 959 else if (tcp_is_fack(tp))
960 NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); 960 mib_idx = LINUX_MIB_TCPFACKREORDER;
961 else 961 else
962 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 962 mib_idx = LINUX_MIB_TCPSACKREORDER;
963
964 NET_INC_STATS_BH(mib_idx);
963#if FASTRETRANS_DEBUG > 1 965#if FASTRETRANS_DEBUG > 1
964 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
965 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1458,18 +1460,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1458 if (!tcp_is_sackblock_valid(tp, dup_sack, 1460 if (!tcp_is_sackblock_valid(tp, dup_sack,
1459 sp[used_sacks].start_seq, 1461 sp[used_sacks].start_seq,
1460 sp[used_sacks].end_seq)) { 1462 sp[used_sacks].end_seq)) {
1463 int mib_idx;
1464
1461 if (dup_sack) { 1465 if (dup_sack) {
1462 if (!tp->undo_marker) 1466 if (!tp->undo_marker)
1463 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); 1467 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1464 else 1468 else
1465 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); 1469 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1466 } else { 1470 } else {
1467 /* Don't count olds caused by ACK reordering */ 1471 /* Don't count olds caused by ACK reordering */
1468 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1472 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1469 !after(sp[used_sacks].end_seq, tp->snd_una)) 1473 !after(sp[used_sacks].end_seq, tp->snd_una))
1470 continue; 1474 continue;
1471 NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); 1475 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1472 } 1476 }
1477
1478 NET_INC_STATS_BH(mib_idx);
1473 if (i == 0) 1479 if (i == 0)
1474 first_sack_index = -1; 1480 first_sack_index = -1;
1475 continue; 1481 continue;
@@ -2382,15 +2388,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
2382 struct tcp_sock *tp = tcp_sk(sk); 2388 struct tcp_sock *tp = tcp_sk(sk);
2383 2389
2384 if (tcp_may_undo(tp)) { 2390 if (tcp_may_undo(tp)) {
2391 int mib_idx;
2392
2385 /* Happy end! We did not retransmit anything 2393 /* Happy end! We did not retransmit anything
2386 * or our original transmission succeeded. 2394 * or our original transmission succeeded.
2387 */ 2395 */
2388 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2396 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2389 tcp_undo_cwr(sk, 1); 2397 tcp_undo_cwr(sk, 1);
2390 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2398 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2391 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2399 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2392 else 2400 else
2393 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 2401 mib_idx = LINUX_MIB_TCPFULLUNDO;
2402
2403 NET_INC_STATS_BH(mib_idx);
2394 tp->undo_marker = 0; 2404 tp->undo_marker = 0;
2395 } 2405 }
2396 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2406 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2562,7 +2572,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2562 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 2572 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
2563 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2573 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2564 (tcp_fackets_out(tp) > tp->reordering)); 2574 (tcp_fackets_out(tp) > tp->reordering));
2565 int fast_rexmit = 0; 2575 int fast_rexmit = 0, mib_idx;
2566 2576
2567 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2577 if (WARN_ON(!tp->packets_out && tp->sacked_out))
2568 tp->sacked_out = 0; 2578 tp->sacked_out = 0;
@@ -2685,9 +2695,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2685 /* Otherwise enter Recovery state */ 2695 /* Otherwise enter Recovery state */
2686 2696
2687 if (tcp_is_reno(tp)) 2697 if (tcp_is_reno(tp))
2688 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); 2698 mib_idx = LINUX_MIB_TCPRENORECOVERY;
2689 else 2699 else
2690 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); 2700 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2701
2702 NET_INC_STATS_BH(mib_idx);
2691 2703
2692 tp->high_seq = tp->snd_nxt; 2704 tp->high_seq = tp->snd_nxt;
2693 tp->prior_ssthresh = 0; 2705 tp->prior_ssthresh = 0;
@@ -3450,6 +3462,43 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3450 return 1; 3462 return 1;
3451} 3463}
3452 3464
3465#ifdef CONFIG_TCP_MD5SIG
3466/*
3467 * Parse MD5 Signature option
3468 */
3469u8 *tcp_parse_md5sig_option(struct tcphdr *th)
3470{
3471 int length = (th->doff << 2) - sizeof (*th);
3472 u8 *ptr = (u8*)(th + 1);
3473
3474 /* If the TCP option is too short, we can short cut */
3475 if (length < TCPOLEN_MD5SIG)
3476 return NULL;
3477
3478 while (length > 0) {
3479 int opcode = *ptr++;
3480 int opsize;
3481
3482 switch(opcode) {
3483 case TCPOPT_EOL:
3484 return NULL;
3485 case TCPOPT_NOP:
3486 length--;
3487 continue;
3488 default:
3489 opsize = *ptr++;
3490 if (opsize < 2 || opsize > length)
3491 return NULL;
3492 if (opcode == TCPOPT_MD5SIG)
3493 return ptr;
3494 }
3495 ptr += opsize - 2;
3496 length -= opsize;
3497 }
3498 return NULL;
3499}
3500#endif
3501
3453static inline void tcp_store_ts_recent(struct tcp_sock *tp) 3502static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3454{ 3503{
3455 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3504 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
@@ -3665,10 +3714,14 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
3665static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) 3714static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
3666{ 3715{
3667 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3716 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3717 int mib_idx;
3718
3668 if (before(seq, tp->rcv_nxt)) 3719 if (before(seq, tp->rcv_nxt))
3669 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); 3720 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
3670 else 3721 else
3671 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); 3722 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3723
3724 NET_INC_STATS_BH(mib_idx);
3672 3725
3673 tp->rx_opt.dsack = 1; 3726 tp->rx_opt.dsack = 1;
3674 tp->duplicate_sack[0].start_seq = seq; 3727 tp->duplicate_sack[0].start_seq = seq;
@@ -5422,6 +5475,9 @@ EXPORT_SYMBOL(sysctl_tcp_ecn);
5422EXPORT_SYMBOL(sysctl_tcp_reordering); 5475EXPORT_SYMBOL(sysctl_tcp_reordering);
5423EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 5476EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5424EXPORT_SYMBOL(tcp_parse_options); 5477EXPORT_SYMBOL(tcp_parse_options);
5478#ifdef CONFIG_TCP_MD5SIG
5479EXPORT_SYMBOL(tcp_parse_md5sig_option);
5480#endif
5425EXPORT_SYMBOL(tcp_rcv_established); 5481EXPORT_SYMBOL(tcp_rcv_established);
5426EXPORT_SYMBOL(tcp_rcv_state_process); 5482EXPORT_SYMBOL(tcp_rcv_state_process);
5427EXPORT_SYMBOL(tcp_initialize_rcv_mss); 5483EXPORT_SYMBOL(tcp_initialize_rcv_mss);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ffe869ac1bcf..4300bcf2ceaf 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9 *
10 * IPv4 specific functions 8 * IPv4 specific functions
11 * 9 *
12 * 10 *
@@ -91,8 +89,13 @@ static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 __be32 addr); 89 __be32 addr);
92static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 90static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
93 __be32 saddr, __be32 daddr, 91 __be32 saddr, __be32 daddr,
94 struct tcphdr *th, int protocol, 92 struct tcphdr *th, unsigned int tcplen);
95 unsigned int tcplen); 93#else
94static inline
95struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
96{
97 return NULL;
98}
96#endif 99#endif
97 100
98struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 101struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
@@ -582,8 +585,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
582 key, 585 key,
583 ip_hdr(skb)->daddr, 586 ip_hdr(skb)->daddr,
584 ip_hdr(skb)->saddr, 587 ip_hdr(skb)->saddr,
585 &rep.th, IPPROTO_TCP, 588 &rep.th, arg.iov[0].iov_len);
586 arg.iov[0].iov_len);
587 } 589 }
588#endif 590#endif
589 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 591 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
@@ -602,9 +604,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
602 outside socket context is ugly, certainly. What can I do? 604 outside socket context is ugly, certainly. What can I do?
603 */ 605 */
604 606
605static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, 607static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
606 struct sk_buff *skb, u32 seq, u32 ack, 608 u32 win, u32 ts, int oif,
607 u32 win, u32 ts) 609 struct tcp_md5sig_key *key)
608{ 610{
609 struct tcphdr *th = tcp_hdr(skb); 611 struct tcphdr *th = tcp_hdr(skb);
610 struct { 612 struct {
@@ -616,10 +618,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
616 ]; 618 ];
617 } rep; 619 } rep;
618 struct ip_reply_arg arg; 620 struct ip_reply_arg arg;
619#ifdef CONFIG_TCP_MD5SIG
620 struct tcp_md5sig_key *key;
621 struct tcp_md5sig_key tw_key;
622#endif
623 621
624 memset(&rep.th, 0, sizeof(struct tcphdr)); 622 memset(&rep.th, 0, sizeof(struct tcphdr));
625 memset(&arg, 0, sizeof(arg)); 623 memset(&arg, 0, sizeof(arg));
@@ -645,23 +643,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
645 rep.th.window = htons(win); 643 rep.th.window = htons(win);
646 644
647#ifdef CONFIG_TCP_MD5SIG 645#ifdef CONFIG_TCP_MD5SIG
648 /*
649 * The SKB holds an imcoming packet, but may not have a valid ->sk
650 * pointer. This is especially the case when we're dealing with a
651 * TIME_WAIT ack, because the sk structure is long gone, and only
652 * the tcp_timewait_sock remains. So the md5 key is stashed in that
653 * structure, and we use it in preference. I believe that (twsk ||
654 * skb->sk) holds true, but we program defensively.
655 */
656 if (!twsk && skb->sk) {
657 key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
658 } else if (twsk && twsk->tw_md5_keylen) {
659 tw_key.key = twsk->tw_md5_key;
660 tw_key.keylen = twsk->tw_md5_keylen;
661 key = &tw_key;
662 } else
663 key = NULL;
664
665 if (key) { 646 if (key) {
666 int offset = (ts) ? 3 : 0; 647 int offset = (ts) ? 3 : 0;
667 648
@@ -676,16 +657,15 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
676 key, 657 key,
677 ip_hdr(skb)->daddr, 658 ip_hdr(skb)->daddr,
678 ip_hdr(skb)->saddr, 659 ip_hdr(skb)->saddr,
679 &rep.th, IPPROTO_TCP, 660 &rep.th, arg.iov[0].iov_len);
680 arg.iov[0].iov_len);
681 } 661 }
682#endif 662#endif
683 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 663 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
684 ip_hdr(skb)->saddr, /* XXX */ 664 ip_hdr(skb)->saddr, /* XXX */
685 arg.iov[0].iov_len, IPPROTO_TCP, 0); 665 arg.iov[0].iov_len, IPPROTO_TCP, 0);
686 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 666 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
687 if (twsk) 667 if (oif)
688 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if; 668 arg.bound_dev_if = oif;
689 669
690 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb, 670 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb,
691 &arg, arg.iov[0].iov_len); 671 &arg, arg.iov[0].iov_len);
@@ -698,9 +678,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
698 struct inet_timewait_sock *tw = inet_twsk(sk); 678 struct inet_timewait_sock *tw = inet_twsk(sk);
699 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 679 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
700 680
701 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 681 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
702 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 682 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
703 tcptw->tw_ts_recent); 683 tcptw->tw_ts_recent,
684 tw->tw_bound_dev_if,
685 tcp_twsk_md5_key(tcptw)
686 );
704 687
705 inet_twsk_put(tw); 688 inet_twsk_put(tw);
706} 689}
@@ -708,9 +691,11 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
708static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, 691static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
709 struct request_sock *req) 692 struct request_sock *req)
710{ 693{
711 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, 694 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
712 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 695 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
713 req->ts_recent); 696 req->ts_recent,
697 0,
698 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr));
714} 699}
715 700
716/* 701/*
@@ -1002,18 +987,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1002 987
1003static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 988static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1004 __be32 saddr, __be32 daddr, 989 __be32 saddr, __be32 daddr,
1005 struct tcphdr *th, int protocol, 990 struct tcphdr *th,
1006 unsigned int tcplen) 991 unsigned int tcplen)
1007{ 992{
1008 struct scatterlist sg[4];
1009 __u16 data_len;
1010 int block = 0;
1011 __sum16 old_checksum;
1012 struct tcp_md5sig_pool *hp; 993 struct tcp_md5sig_pool *hp;
1013 struct tcp4_pseudohdr *bp; 994 struct tcp4_pseudohdr *bp;
1014 struct hash_desc *desc;
1015 int err; 995 int err;
1016 unsigned int nbytes = 0;
1017 996
1018 /* 997 /*
1019 * Okay, so RFC2385 is turned on for this connection, 998 * Okay, so RFC2385 is turned on for this connection,
@@ -1025,63 +1004,25 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1025 goto clear_hash_noput; 1004 goto clear_hash_noput;
1026 1005
1027 bp = &hp->md5_blk.ip4; 1006 bp = &hp->md5_blk.ip4;
1028 desc = &hp->md5_desc;
1029 1007
1030 /* 1008 /*
1031 * 1. the TCP pseudo-header (in the order: source IP address, 1009 * The TCP pseudo-header (in the order: source IP address,
1032 * destination IP address, zero-padded protocol number, and 1010 * destination IP address, zero-padded protocol number, and
1033 * segment length) 1011 * segment length)
1034 */ 1012 */
1035 bp->saddr = saddr; 1013 bp->saddr = saddr;
1036 bp->daddr = daddr; 1014 bp->daddr = daddr;
1037 bp->pad = 0; 1015 bp->pad = 0;
1038 bp->protocol = protocol; 1016 bp->protocol = IPPROTO_TCP;
1039 bp->len = htons(tcplen); 1017 bp->len = htons(tcplen);
1040 1018
1041 sg_init_table(sg, 4); 1019 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
1042 1020 th, tcplen, hp);
1043 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1044 nbytes += sizeof(*bp);
1045
1046 /* 2. the TCP header, excluding options, and assuming a
1047 * checksum of zero/
1048 */
1049 old_checksum = th->check;
1050 th->check = 0;
1051 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1052 nbytes += sizeof(struct tcphdr);
1053
1054 /* 3. the TCP segment data (if any) */
1055 data_len = tcplen - (th->doff << 2);
1056 if (data_len > 0) {
1057 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1058 sg_set_buf(&sg[block++], data, data_len);
1059 nbytes += data_len;
1060 }
1061
1062 /* 4. an independently-specified key or password, known to both
1063 * TCPs and presumably connection-specific
1064 */
1065 sg_set_buf(&sg[block++], key->key, key->keylen);
1066 nbytes += key->keylen;
1067
1068 sg_mark_end(&sg[block - 1]);
1069
1070 /* Now store the Hash into the packet */
1071 err = crypto_hash_init(desc);
1072 if (err)
1073 goto clear_hash;
1074 err = crypto_hash_update(desc, sg, nbytes);
1075 if (err)
1076 goto clear_hash;
1077 err = crypto_hash_final(desc, md5_hash);
1078 if (err) 1021 if (err)
1079 goto clear_hash; 1022 goto clear_hash;
1080 1023
1081 /* Reset header, and free up the crypto */ 1024 /* Free up the crypto pool */
1082 tcp_put_md5sig_pool(); 1025 tcp_put_md5sig_pool();
1083 th->check = old_checksum;
1084
1085out: 1026out:
1086 return 0; 1027 return 0;
1087clear_hash: 1028clear_hash:
@@ -1095,7 +1036,7 @@ int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1095 struct sock *sk, 1036 struct sock *sk,
1096 struct dst_entry *dst, 1037 struct dst_entry *dst,
1097 struct request_sock *req, 1038 struct request_sock *req,
1098 struct tcphdr *th, int protocol, 1039 struct tcphdr *th,
1099 unsigned int tcplen) 1040 unsigned int tcplen)
1100{ 1041{
1101 __be32 saddr, daddr; 1042 __be32 saddr, daddr;
@@ -1111,7 +1052,7 @@ int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1111 } 1052 }
1112 return tcp_v4_do_calc_md5_hash(md5_hash, key, 1053 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1113 saddr, daddr, 1054 saddr, daddr,
1114 th, protocol, tcplen); 1055 th, tcplen);
1115} 1056}
1116 1057
1117EXPORT_SYMBOL(tcp_v4_calc_md5_hash); 1058EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
@@ -1130,52 +1071,12 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1130 struct tcp_md5sig_key *hash_expected; 1071 struct tcp_md5sig_key *hash_expected;
1131 const struct iphdr *iph = ip_hdr(skb); 1072 const struct iphdr *iph = ip_hdr(skb);
1132 struct tcphdr *th = tcp_hdr(skb); 1073 struct tcphdr *th = tcp_hdr(skb);
1133 int length = (th->doff << 2) - sizeof(struct tcphdr);
1134 int genhash; 1074 int genhash;
1135 unsigned char *ptr;
1136 unsigned char newhash[16]; 1075 unsigned char newhash[16];
1137 1076
1138 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); 1077 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1078 hash_location = tcp_parse_md5sig_option(th);
1139 1079
1140 /*
1141 * If the TCP option length is less than the TCP_MD5SIG
1142 * option length, then we can shortcut
1143 */
1144 if (length < TCPOLEN_MD5SIG) {
1145 if (hash_expected)
1146 return 1;
1147 else
1148 return 0;
1149 }
1150
1151 /* Okay, we can't shortcut - we have to grub through the options */
1152 ptr = (unsigned char *)(th + 1);
1153 while (length > 0) {
1154 int opcode = *ptr++;
1155 int opsize;
1156
1157 switch (opcode) {
1158 case TCPOPT_EOL:
1159 goto done_opts;
1160 case TCPOPT_NOP:
1161 length--;
1162 continue;
1163 default:
1164 opsize = *ptr++;
1165 if (opsize < 2)
1166 goto done_opts;
1167 if (opsize > length)
1168 goto done_opts;
1169
1170 if (opcode == TCPOPT_MD5SIG) {
1171 hash_location = ptr;
1172 goto done_opts;
1173 }
1174 }
1175 ptr += opsize-2;
1176 length -= opsize;
1177 }
1178done_opts:
1179 /* We've parsed the options - do we have a hash? */ 1080 /* We've parsed the options - do we have a hash? */
1180 if (!hash_expected && !hash_location) 1081 if (!hash_expected && !hash_location)
1181 return 0; 1082 return 0;
@@ -1202,8 +1103,7 @@ done_opts:
1202 genhash = tcp_v4_do_calc_md5_hash(newhash, 1103 genhash = tcp_v4_do_calc_md5_hash(newhash,
1203 hash_expected, 1104 hash_expected,
1204 iph->saddr, iph->daddr, 1105 iph->saddr, iph->daddr,
1205 th, sk->sk_protocol, 1106 th, skb->len);
1206 skb->len);
1207 1107
1208 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1108 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1209 if (net_ratelimit()) { 1109 if (net_ratelimit()) {
@@ -1871,7 +1771,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1871 return 0; 1771 return 0;
1872} 1772}
1873 1773
1874int tcp_v4_destroy_sock(struct sock *sk) 1774void tcp_v4_destroy_sock(struct sock *sk)
1875{ 1775{
1876 struct tcp_sock *tp = tcp_sk(sk); 1776 struct tcp_sock *tp = tcp_sk(sk);
1877 1777
@@ -1915,8 +1815,6 @@ int tcp_v4_destroy_sock(struct sock *sk)
1915 } 1815 }
1916 1816
1917 atomic_dec(&tcp_sockets_allocated); 1817 atomic_dec(&tcp_sockets_allocated);
1918
1919 return 0;
1920} 1818}
1921 1819
1922EXPORT_SYMBOL(tcp_v4_destroy_sock); 1820EXPORT_SYMBOL(tcp_v4_destroy_sock);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 8245247a6ceb..ea68a478fad6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ad993ecb4810..edef2afe905e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -607,7 +605,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
607 md5, 605 md5,
608 sk, NULL, NULL, 606 sk, NULL, NULL,
609 tcp_hdr(skb), 607 tcp_hdr(skb),
610 sk->sk_protocol,
611 skb->len); 608 skb->len);
612 } 609 }
613#endif 610#endif
@@ -1988,14 +1985,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1988 1985
1989 if (sacked & TCPCB_LOST) { 1986 if (sacked & TCPCB_LOST) {
1990 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 1987 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1988 int mib_idx;
1989
1991 if (tcp_retransmit_skb(sk, skb)) { 1990 if (tcp_retransmit_skb(sk, skb)) {
1992 tp->retransmit_skb_hint = NULL; 1991 tp->retransmit_skb_hint = NULL;
1993 return; 1992 return;
1994 } 1993 }
1995 if (icsk->icsk_ca_state != TCP_CA_Loss) 1994 if (icsk->icsk_ca_state != TCP_CA_Loss)
1996 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 1995 mib_idx = LINUX_MIB_TCPFASTRETRANS;
1997 else 1996 else
1998 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 1997 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
1998 NET_INC_STATS_BH(mib_idx);
1999 1999
2000 if (skb == tcp_write_queue_head(sk)) 2000 if (skb == tcp_write_queue_head(sk))
2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -2266,7 +2266,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2266 tp->af_specific->calc_md5_hash(md5_hash_location, 2266 tp->af_specific->calc_md5_hash(md5_hash_location,
2267 md5, 2267 md5,
2268 NULL, dst, req, 2268 NULL, dst, req,
2269 tcp_hdr(skb), sk->sk_protocol, 2269 tcp_hdr(skb),
2270 skb->len); 2270 skb->len);
2271 } 2271 }
2272#endif 2272#endif
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 63ed9d6830e7..6a480d1fd8f6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -328,24 +326,27 @@ static void tcp_retransmit_timer(struct sock *sk)
328 goto out; 326 goto out;
329 327
330 if (icsk->icsk_retransmits == 0) { 328 if (icsk->icsk_retransmits == 0) {
329 int mib_idx;
330
331 if (icsk->icsk_ca_state == TCP_CA_Disorder || 331 if (icsk->icsk_ca_state == TCP_CA_Disorder ||
332 icsk->icsk_ca_state == TCP_CA_Recovery) { 332 icsk->icsk_ca_state == TCP_CA_Recovery) {
333 if (tcp_is_sack(tp)) { 333 if (tcp_is_sack(tp)) {
334 if (icsk->icsk_ca_state == TCP_CA_Recovery) 334 if (icsk->icsk_ca_state == TCP_CA_Recovery)
335 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); 335 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
336 else 336 else
337 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); 337 mib_idx = LINUX_MIB_TCPSACKFAILURES;
338 } else { 338 } else {
339 if (icsk->icsk_ca_state == TCP_CA_Recovery) 339 if (icsk->icsk_ca_state == TCP_CA_Recovery)
340 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); 340 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
341 else 341 else
342 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); 342 mib_idx = LINUX_MIB_TCPRENOFAILURES;
343 } 343 }
344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { 344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
345 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); 345 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
346 } else { 346 } else {
347 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); 347 mib_idx = LINUX_MIB_TCPTIMEOUTS;
348 } 348 }
349 NET_INC_STATS_BH(mib_idx);
349 } 350 }
350 351
351 if (tcp_use_frto(sk)) { 352 if (tcp_use_frto(sk)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 56fcda3694ba..7187121e922d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * The User Datagram Protocol (UDP). 6 * The User Datagram Protocol (UDP).
7 * 7 *
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
@@ -136,7 +134,7 @@ static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
136 struct sock *sk; 134 struct sock *sk;
137 struct hlist_node *node; 135 struct hlist_node *node;
138 136
139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 137 sk_for_each(sk, node, &udptable[udp_hashfn(net, num)])
140 if (net_eq(sock_net(sk), net) && sk->sk_hash == num) 138 if (net_eq(sock_net(sk), net) && sk->sk_hash == num)
141 return 1; 139 return 1;
142 return 0; 140 return 0;
@@ -176,7 +174,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
176 for (i = 0; i < UDP_HTABLE_SIZE; i++) { 174 for (i = 0; i < UDP_HTABLE_SIZE; i++) {
177 int size = 0; 175 int size = 0;
178 176
179 head = &udptable[rover & (UDP_HTABLE_SIZE - 1)]; 177 head = &udptable[udp_hashfn(net, rover)];
180 if (hlist_empty(head)) 178 if (hlist_empty(head))
181 goto gotit; 179 goto gotit;
182 180
@@ -213,7 +211,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
213gotit: 211gotit:
214 snum = rover; 212 snum = rover;
215 } else { 213 } else {
216 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 214 head = &udptable[udp_hashfn(net, snum)];
217 215
218 sk_for_each(sk2, node, head) 216 sk_for_each(sk2, node, head)
219 if (sk2->sk_hash == snum && 217 if (sk2->sk_hash == snum &&
@@ -229,7 +227,7 @@ gotit:
229 inet_sk(sk)->num = snum; 227 inet_sk(sk)->num = snum;
230 sk->sk_hash = snum; 228 sk->sk_hash = snum;
231 if (sk_unhashed(sk)) { 229 if (sk_unhashed(sk)) {
232 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 230 head = &udptable[udp_hashfn(net, snum)];
233 sk_add_node(sk, head); 231 sk_add_node(sk, head);
234 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 232 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
235 } 233 }
@@ -266,7 +264,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
266 int badness = -1; 264 int badness = -1;
267 265
268 read_lock(&udp_hash_lock); 266 read_lock(&udp_hash_lock);
269 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 267 sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) {
270 struct inet_sock *inet = inet_sk(sk); 268 struct inet_sock *inet = inet_sk(sk);
271 269
272 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && 270 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
@@ -528,7 +526,8 @@ out:
528 up->len = 0; 526 up->len = 0;
529 up->pending = 0; 527 up->pending = 0;
530 if (!err) 528 if (!err)
531 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); 529 UDP_INC_STATS_USER(sock_net(sk),
530 UDP_MIB_OUTDATAGRAMS, is_udplite);
532 return err; 531 return err;
533} 532}
534 533
@@ -727,7 +726,8 @@ out:
727 * seems like overkill. 726 * seems like overkill.
728 */ 727 */
729 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 728 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
730 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); 729 UDP_INC_STATS_USER(sock_net(sk),
730 UDP_MIB_SNDBUFERRORS, is_udplite);
731 } 731 }
732 return err; 732 return err;
733 733
@@ -890,7 +890,8 @@ try_again:
890 goto out_free; 890 goto out_free;
891 891
892 if (!peeked) 892 if (!peeked)
893 UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); 893 UDP_INC_STATS_USER(sock_net(sk),
894 UDP_MIB_INDATAGRAMS, is_udplite);
894 895
895 sock_recv_timestamp(msg, sk, skb); 896 sock_recv_timestamp(msg, sk, skb);
896 897
@@ -919,7 +920,7 @@ out:
919csum_copy_err: 920csum_copy_err:
920 lock_sock(sk); 921 lock_sock(sk);
921 if (!skb_kill_datagram(sk, skb, flags)) 922 if (!skb_kill_datagram(sk, skb, flags))
922 UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); 923 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
923 release_sock(sk); 924 release_sock(sk);
924 925
925 if (noblock) 926 if (noblock)
@@ -990,7 +991,8 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
990 991
991 ret = (*up->encap_rcv)(sk, skb); 992 ret = (*up->encap_rcv)(sk, skb);
992 if (ret <= 0) { 993 if (ret <= 0) {
993 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 994 UDP_INC_STATS_BH(sock_net(sk),
995 UDP_MIB_INDATAGRAMS,
994 is_udplite); 996 is_udplite);
995 return -ret; 997 return -ret;
996 } 998 }
@@ -1042,15 +1044,18 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1042 1044
1043 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1045 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
1044 /* Note that an ENOMEM error is charged twice */ 1046 /* Note that an ENOMEM error is charged twice */
1045 if (rc == -ENOMEM) 1047 if (rc == -ENOMEM) {
1046 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); 1048 UDP_INC_STATS_BH(sock_net(sk),
1049 UDP_MIB_RCVBUFERRORS, is_udplite);
1050 atomic_inc(&sk->sk_drops);
1051 }
1047 goto drop; 1052 goto drop;
1048 } 1053 }
1049 1054
1050 return 0; 1055 return 0;
1051 1056
1052drop: 1057drop:
1053 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 1058 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1054 kfree_skb(skb); 1059 kfree_skb(skb);
1055 return -1; 1060 return -1;
1056} 1061}
@@ -1061,7 +1066,7 @@ drop:
1061 * Note: called only from the BH handler context, 1066 * Note: called only from the BH handler context,
1062 * so we don't need to lock the hashes. 1067 * so we don't need to lock the hashes.
1063 */ 1068 */
1064static int __udp4_lib_mcast_deliver(struct sk_buff *skb, 1069static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1065 struct udphdr *uh, 1070 struct udphdr *uh,
1066 __be32 saddr, __be32 daddr, 1071 __be32 saddr, __be32 daddr,
1067 struct hlist_head udptable[]) 1072 struct hlist_head udptable[])
@@ -1070,7 +1075,7 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1070 int dif; 1075 int dif;
1071 1076
1072 read_lock(&udp_hash_lock); 1077 read_lock(&udp_hash_lock);
1073 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); 1078 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
1074 dif = skb->dev->ifindex; 1079 dif = skb->dev->ifindex;
1075 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 1080 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1076 if (sk) { 1081 if (sk) {
@@ -1158,6 +1163,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1158 struct rtable *rt = (struct rtable*)skb->dst; 1163 struct rtable *rt = (struct rtable*)skb->dst;
1159 __be32 saddr = ip_hdr(skb)->saddr; 1164 __be32 saddr = ip_hdr(skb)->saddr;
1160 __be32 daddr = ip_hdr(skb)->daddr; 1165 __be32 daddr = ip_hdr(skb)->daddr;
1166 struct net *net = dev_net(skb->dev);
1161 1167
1162 /* 1168 /*
1163 * Validate the packet. 1169 * Validate the packet.
@@ -1180,9 +1186,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1180 goto csum_error; 1186 goto csum_error;
1181 1187
1182 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 1188 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1183 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); 1189 return __udp4_lib_mcast_deliver(net, skb, uh,
1190 saddr, daddr, udptable);
1184 1191
1185 sk = __udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr, 1192 sk = __udp4_lib_lookup(net, saddr, uh->source, daddr,
1186 uh->dest, inet_iif(skb), udptable); 1193 uh->dest, inet_iif(skb), udptable);
1187 1194
1188 if (sk != NULL) { 1195 if (sk != NULL) {
@@ -1211,7 +1218,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1211 if (udp_lib_checksum_complete(skb)) 1218 if (udp_lib_checksum_complete(skb))
1212 goto csum_error; 1219 goto csum_error;
1213 1220
1214 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1221 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1215 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 1222 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1216 1223
1217 /* 1224 /*
@@ -1245,7 +1252,7 @@ csum_error:
1245 ntohs(uh->dest), 1252 ntohs(uh->dest),
1246 ulen); 1253 ulen);
1247drop: 1254drop:
1248 UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1255 UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1249 kfree_skb(skb); 1256 kfree_skb(skb);
1250 return 0; 1257 return 0;
1251} 1258}
@@ -1255,12 +1262,11 @@ int udp_rcv(struct sk_buff *skb)
1255 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP); 1262 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
1256} 1263}
1257 1264
1258int udp_destroy_sock(struct sock *sk) 1265void udp_destroy_sock(struct sock *sk)
1259{ 1266{
1260 lock_sock(sk); 1267 lock_sock(sk);
1261 udp_flush_pending_frames(sk); 1268 udp_flush_pending_frames(sk);
1262 release_sock(sk); 1269 release_sock(sk);
1263 return 0;
1264} 1270}
1265 1271
1266/* 1272/*
@@ -1453,7 +1459,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1453 spin_lock_bh(&rcvq->lock); 1459 spin_lock_bh(&rcvq->lock);
1454 while ((skb = skb_peek(rcvq)) != NULL && 1460 while ((skb = skb_peek(rcvq)) != NULL &&
1455 udp_lib_checksum_complete(skb)) { 1461 udp_lib_checksum_complete(skb)) {
1456 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); 1462 UDP_INC_STATS_BH(sock_net(sk),
1463 UDP_MIB_INERRORS, is_lite);
1457 __skb_unlink(skb, rcvq); 1464 __skb_unlink(skb, rcvq);
1458 kfree_skb(skb); 1465 kfree_skb(skb);
1459 } 1466 }
@@ -1629,12 +1636,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1629 __u16 srcp = ntohs(inet->sport); 1636 __u16 srcp = ntohs(inet->sport);
1630 1637
1631 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 1638 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
1632 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p%n", 1639 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
1633 bucket, src, srcp, dest, destp, sp->sk_state, 1640 bucket, src, srcp, dest, destp, sp->sk_state,
1634 atomic_read(&sp->sk_wmem_alloc), 1641 atomic_read(&sp->sk_wmem_alloc),
1635 atomic_read(&sp->sk_rmem_alloc), 1642 atomic_read(&sp->sk_rmem_alloc),
1636 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 1643 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1637 atomic_read(&sp->sk_refcnt), sp, len); 1644 atomic_read(&sp->sk_refcnt), sp,
1645 atomic_read(&sp->sk_drops), len);
1638} 1646}
1639 1647
1640int udp4_seq_show(struct seq_file *seq, void *v) 1648int udp4_seq_show(struct seq_file *seq, void *v)
@@ -1643,7 +1651,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
1643 seq_printf(seq, "%-127s\n", 1651 seq_printf(seq, "%-127s\n",
1644 " sl local_address rem_address st tx_queue " 1652 " sl local_address rem_address st tx_queue "
1645 "rx_queue tr tm->when retrnsmt uid timeout " 1653 "rx_queue tr tm->when retrnsmt uid timeout "
1646 "inode"); 1654 "inode ref pointer drops");
1647 else { 1655 else {
1648 struct udp_iter_state *state = seq->private; 1656 struct udp_iter_state *state = seq->private;
1649 int len; 1657 int len;
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 7288bf7977fb..2e9bad2fa1bc 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -26,7 +26,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
26extern int udp_sendpage(struct sock *sk, struct page *page, int offset, 26extern int udp_sendpage(struct sock *sk, struct page *page, int offset,
27 size_t size, int flags); 27 size_t size, int flags);
28extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 28extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
29extern int udp_destroy_sock(struct sock *sk); 29extern void udp_destroy_sock(struct sock *sk);
30 30
31#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
32extern int udp4_seq_show(struct seq_file *seq, void *v); 32extern int udp4_seq_show(struct seq_file *seq, void *v);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 72ce26b6c4d3..4ad16b6d5138 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). 2 * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828).
3 * 3 *
4 * Version: $Id: udplite.c,v 1.25 2006/10/19 07:22:36 gerrit Exp $
5 *
6 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> 4 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk>
7 * 5 *
8 * Changes: 6 * Changes:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 147588f4c7c0..2ec73e62202c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: addrconf.c,v 1.69 2001/10/31 21:55:54 davem Exp $
10 *
11 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -121,6 +119,7 @@ static void ipv6_regen_rndid(unsigned long data);
121static int desync_factor = MAX_DESYNC_FACTOR * HZ; 119static int desync_factor = MAX_DESYNC_FACTOR * HZ;
122#endif 120#endif
123 121
122static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
124static int ipv6_count_addresses(struct inet6_dev *idev); 123static int ipv6_count_addresses(struct inet6_dev *idev);
125 124
126/* 125/*
@@ -185,6 +184,8 @@ struct ipv6_devconf ipv6_devconf __read_mostly = {
185#endif 184#endif
186 .proxy_ndp = 0, 185 .proxy_ndp = 0,
187 .accept_source_route = 0, /* we do not accept RH0 by default. */ 186 .accept_source_route = 0, /* we do not accept RH0 by default. */
187 .disable_ipv6 = 0,
188 .accept_dad = 1,
188}; 189};
189 190
190static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { 191static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -217,6 +218,8 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
217#endif 218#endif
218 .proxy_ndp = 0, 219 .proxy_ndp = 0,
219 .accept_source_route = 0, /* we do not accept RH0 by default. */ 220 .accept_source_route = 0, /* we do not accept RH0 by default. */
221 .disable_ipv6 = 0,
222 .accept_dad = 1,
220}; 223};
221 224
222/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ 225/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
@@ -231,6 +234,12 @@ static inline int addrconf_qdisc_ok(struct net_device *dev)
231 return (dev->qdisc != &noop_qdisc); 234 return (dev->qdisc != &noop_qdisc);
232} 235}
233 236
237/* Check if a route is valid prefix route */
238static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
239{
240 return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0);
241}
242
234static void addrconf_del_timer(struct inet6_ifaddr *ifp) 243static void addrconf_del_timer(struct inet6_ifaddr *ifp)
235{ 244{
236 if (del_timer(&ifp->timer)) 245 if (del_timer(&ifp->timer))
@@ -344,6 +353,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
344 kfree(ndev); 353 kfree(ndev);
345 return NULL; 354 return NULL;
346 } 355 }
356 if (ndev->cnf.forwarding)
357 dev_disable_lro(dev);
347 /* We refer to the device */ 358 /* We refer to the device */
348 dev_hold(dev); 359 dev_hold(dev);
349 360
@@ -372,6 +383,9 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
372 */ 383 */
373 in6_dev_hold(ndev); 384 in6_dev_hold(ndev);
374 385
386 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
387 ndev->cnf.accept_dad = -1;
388
375#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 389#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
376 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { 390 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
377 printk(KERN_INFO 391 printk(KERN_INFO
@@ -438,6 +452,8 @@ static void dev_forward_change(struct inet6_dev *idev)
438 if (!idev) 452 if (!idev)
439 return; 453 return;
440 dev = idev->dev; 454 dev = idev->dev;
455 if (idev->cnf.forwarding)
456 dev_disable_lro(dev);
441 if (dev && (dev->flags & IFF_MULTICAST)) { 457 if (dev && (dev->flags & IFF_MULTICAST)) {
442 if (idev->cnf.forwarding) 458 if (idev->cnf.forwarding)
443 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 459 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
@@ -483,12 +499,14 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
483 if (p == &net->ipv6.devconf_dflt->forwarding) 499 if (p == &net->ipv6.devconf_dflt->forwarding)
484 return; 500 return;
485 501
502 rtnl_lock();
486 if (p == &net->ipv6.devconf_all->forwarding) { 503 if (p == &net->ipv6.devconf_all->forwarding) {
487 __s32 newf = net->ipv6.devconf_all->forwarding; 504 __s32 newf = net->ipv6.devconf_all->forwarding;
488 net->ipv6.devconf_dflt->forwarding = newf; 505 net->ipv6.devconf_dflt->forwarding = newf;
489 addrconf_forward_change(net, newf); 506 addrconf_forward_change(net, newf);
490 } else if ((!*p) ^ (!old)) 507 } else if ((!*p) ^ (!old))
491 dev_forward_change((struct inet6_dev *)table->extra1); 508 dev_forward_change((struct inet6_dev *)table->extra1);
509 rtnl_unlock();
492 510
493 if (*p) 511 if (*p)
494 rt6_purge_dflt_routers(net); 512 rt6_purge_dflt_routers(net);
@@ -568,6 +586,13 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
568 struct rt6_info *rt; 586 struct rt6_info *rt;
569 int hash; 587 int hash;
570 int err = 0; 588 int err = 0;
589 int addr_type = ipv6_addr_type(addr);
590
591 if (addr_type == IPV6_ADDR_ANY ||
592 addr_type & IPV6_ADDR_MULTICAST ||
593 (!(idev->dev->flags & IFF_LOOPBACK) &&
594 addr_type & IPV6_ADDR_LOOPBACK))
595 return ERR_PTR(-EADDRNOTAVAIL);
571 596
572 rcu_read_lock_bh(); 597 rcu_read_lock_bh();
573 if (idev->dead) { 598 if (idev->dead) {
@@ -777,7 +802,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
777 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 802 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
778 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1); 803 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
779 804
780 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 805 if (rt && addrconf_is_prefix_route(rt)) {
781 if (onlink == 0) { 806 if (onlink == 0) {
782 ip6_del_rt(rt); 807 ip6_del_rt(rt);
783 rt = NULL; 808 rt = NULL;
@@ -958,7 +983,8 @@ static inline int ipv6_saddr_preferred(int type)
958 return 0; 983 return 0;
959} 984}
960 985
961static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score, 986static int ipv6_get_saddr_eval(struct net *net,
987 struct ipv6_saddr_score *score,
962 struct ipv6_saddr_dst *dst, 988 struct ipv6_saddr_dst *dst,
963 int i) 989 int i)
964{ 990{
@@ -1037,7 +1063,8 @@ static int ipv6_get_saddr_eval(struct ipv6_saddr_score *score,
1037 break; 1063 break;
1038 case IPV6_SADDR_RULE_LABEL: 1064 case IPV6_SADDR_RULE_LABEL:
1039 /* Rule 6: Prefer matching label */ 1065 /* Rule 6: Prefer matching label */
1040 ret = ipv6_addr_label(&score->ifa->addr, score->addr_type, 1066 ret = ipv6_addr_label(net,
1067 &score->ifa->addr, score->addr_type,
1041 score->ifa->idev->dev->ifindex) == dst->label; 1068 score->ifa->idev->dev->ifindex) == dst->label;
1042 break; 1069 break;
1043#ifdef CONFIG_IPV6_PRIVACY 1070#ifdef CONFIG_IPV6_PRIVACY
@@ -1091,7 +1118,7 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev,
1091 dst.addr = daddr; 1118 dst.addr = daddr;
1092 dst.ifindex = dst_dev ? dst_dev->ifindex : 0; 1119 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1093 dst.scope = __ipv6_addr_src_scope(dst_type); 1120 dst.scope = __ipv6_addr_src_scope(dst_type);
1094 dst.label = ipv6_addr_label(daddr, dst_type, dst.ifindex); 1121 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1095 dst.prefs = prefs; 1122 dst.prefs = prefs;
1096 1123
1097 hiscore->rule = -1; 1124 hiscore->rule = -1;
@@ -1159,8 +1186,8 @@ int ipv6_dev_get_saddr(struct net_device *dst_dev,
1159 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) { 1186 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1160 int minihiscore, miniscore; 1187 int minihiscore, miniscore;
1161 1188
1162 minihiscore = ipv6_get_saddr_eval(hiscore, &dst, i); 1189 minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
1163 miniscore = ipv6_get_saddr_eval(score, &dst, i); 1190 miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
1164 1191
1165 if (minihiscore > miniscore) { 1192 if (minihiscore > miniscore) {
1166 if (i == IPV6_SADDR_RULE_SCOPE && 1193 if (i == IPV6_SADDR_RULE_SCOPE &&
@@ -1400,6 +1427,20 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp)
1400 1427
1401void addrconf_dad_failure(struct inet6_ifaddr *ifp) 1428void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1402{ 1429{
1430 struct inet6_dev *idev = ifp->idev;
1431 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
1432 struct in6_addr addr;
1433
1434 addr.s6_addr32[0] = htonl(0xfe800000);
1435 addr.s6_addr32[1] = 0;
1436
1437 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
1438 ipv6_addr_equal(&ifp->addr, &addr)) {
1439 /* DAD failed for link-local based on MAC address */
1440 idev->cnf.disable_ipv6 = 1;
1441 }
1442 }
1443
1403 if (net_ratelimit()) 1444 if (net_ratelimit())
1404 printk(KERN_INFO "%s: duplicate address detected!\n", ifp->idev->dev->name); 1445 printk(KERN_INFO "%s: duplicate address detected!\n", ifp->idev->dev->name);
1405 addrconf_dad_stop(ifp); 1446 addrconf_dad_stop(ifp);
@@ -1788,7 +1829,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1788 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1829 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL,
1789 dev->ifindex, 1); 1830 dev->ifindex, 1);
1790 1831
1791 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 1832 if (rt && addrconf_is_prefix_route(rt)) {
1792 /* Autoconf prefix route */ 1833 /* Autoconf prefix route */
1793 if (valid_lft == 0) { 1834 if (valid_lft == 0) {
1794 ip6_del_rt(rt); 1835 ip6_del_rt(rt);
@@ -2732,6 +2773,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2732 spin_lock_bh(&ifp->lock); 2773 spin_lock_bh(&ifp->lock);
2733 2774
2734 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 2775 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
2776 idev->cnf.accept_dad < 1 ||
2735 !(ifp->flags&IFA_F_TENTATIVE) || 2777 !(ifp->flags&IFA_F_TENTATIVE) ||
2736 ifp->flags & IFA_F_NODAD) { 2778 ifp->flags & IFA_F_NODAD) {
2737 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); 2779 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC);
@@ -2779,6 +2821,11 @@ static void addrconf_dad_timer(unsigned long data)
2779 read_unlock_bh(&idev->lock); 2821 read_unlock_bh(&idev->lock);
2780 goto out; 2822 goto out;
2781 } 2823 }
2824 if (idev->cnf.accept_dad > 1 && idev->cnf.disable_ipv6) {
2825 read_unlock_bh(&idev->lock);
2826 addrconf_dad_failure(ifp);
2827 return;
2828 }
2782 spin_lock_bh(&ifp->lock); 2829 spin_lock_bh(&ifp->lock);
2783 if (ifp->probes == 0) { 2830 if (ifp->probes == 0) {
2784 /* 2831 /*
@@ -3638,6 +3685,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3638#ifdef CONFIG_IPV6_MROUTE 3685#ifdef CONFIG_IPV6_MROUTE
3639 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; 3686 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
3640#endif 3687#endif
3688 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
3689 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
3641} 3690}
3642 3691
3643static inline size_t inet6_if_nlmsg_size(void) 3692static inline size_t inet6_if_nlmsg_size(void)
@@ -4197,6 +4246,22 @@ static struct addrconf_sysctl_table
4197 }, 4246 },
4198#endif 4247#endif
4199 { 4248 {
4249 .ctl_name = CTL_UNNUMBERED,
4250 .procname = "disable_ipv6",
4251 .data = &ipv6_devconf.disable_ipv6,
4252 .maxlen = sizeof(int),
4253 .mode = 0644,
4254 .proc_handler = &proc_dointvec,
4255 },
4256 {
4257 .ctl_name = CTL_UNNUMBERED,
4258 .procname = "accept_dad",
4259 .data = &ipv6_devconf.accept_dad,
4260 .maxlen = sizeof(int),
4261 .mode = 0644,
4262 .proc_handler = &proc_dointvec,
4263 },
4264 {
4200 .ctl_name = 0, /* sentinel */ 4265 .ctl_name = 0, /* sentinel */
4201 } 4266 }
4202 }, 4267 },
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 9bfa8846f262..08909039d87b 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -29,6 +29,9 @@
29 */ 29 */
30struct ip6addrlbl_entry 30struct ip6addrlbl_entry
31{ 31{
32#ifdef CONFIG_NET_NS
33 struct net *lbl_net;
34#endif
32 struct in6_addr prefix; 35 struct in6_addr prefix;
33 int prefixlen; 36 int prefixlen;
34 int ifindex; 37 int ifindex;
@@ -46,6 +49,16 @@ static struct ip6addrlbl_table
46 u32 seq; 49 u32 seq;
47} ip6addrlbl_table; 50} ip6addrlbl_table;
48 51
52static inline
53struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
54{
55#ifdef CONFIG_NET_NS
56 return lbl->lbl_net;
57#else
58 return &init_net;
59#endif
60}
61
49/* 62/*
50 * Default policy table (RFC3484 + extensions) 63 * Default policy table (RFC3484 + extensions)
51 * 64 *
@@ -65,7 +78,7 @@ static struct ip6addrlbl_table
65 78
66#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL 79#define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL
67 80
68static const __initdata struct ip6addrlbl_init_table 81static const __net_initdata struct ip6addrlbl_init_table
69{ 82{
70 const struct in6_addr *prefix; 83 const struct in6_addr *prefix;
71 int prefixlen; 84 int prefixlen;
@@ -108,6 +121,9 @@ static const __initdata struct ip6addrlbl_init_table
108/* Object management */ 121/* Object management */
109static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) 122static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p)
110{ 123{
124#ifdef CONFIG_NET_NS
125 release_net(p->lbl_net);
126#endif
111 kfree(p); 127 kfree(p);
112} 128}
113 129
@@ -128,10 +144,13 @@ static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p)
128} 144}
129 145
130/* Find label */ 146/* Find label */
131static int __ip6addrlbl_match(struct ip6addrlbl_entry *p, 147static int __ip6addrlbl_match(struct net *net,
148 struct ip6addrlbl_entry *p,
132 const struct in6_addr *addr, 149 const struct in6_addr *addr,
133 int addrtype, int ifindex) 150 int addrtype, int ifindex)
134{ 151{
152 if (!net_eq(ip6addrlbl_net(p), net))
153 return 0;
135 if (p->ifindex && p->ifindex != ifindex) 154 if (p->ifindex && p->ifindex != ifindex)
136 return 0; 155 return 0;
137 if (p->addrtype && p->addrtype != addrtype) 156 if (p->addrtype && p->addrtype != addrtype)
@@ -141,19 +160,21 @@ static int __ip6addrlbl_match(struct ip6addrlbl_entry *p,
141 return 1; 160 return 1;
142} 161}
143 162
144static struct ip6addrlbl_entry *__ipv6_addr_label(const struct in6_addr *addr, 163static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
164 const struct in6_addr *addr,
145 int type, int ifindex) 165 int type, int ifindex)
146{ 166{
147 struct hlist_node *pos; 167 struct hlist_node *pos;
148 struct ip6addrlbl_entry *p; 168 struct ip6addrlbl_entry *p;
149 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 169 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
150 if (__ip6addrlbl_match(p, addr, type, ifindex)) 170 if (__ip6addrlbl_match(net, p, addr, type, ifindex))
151 return p; 171 return p;
152 } 172 }
153 return NULL; 173 return NULL;
154} 174}
155 175
156u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex) 176u32 ipv6_addr_label(struct net *net,
177 const struct in6_addr *addr, int type, int ifindex)
157{ 178{
158 u32 label; 179 u32 label;
159 struct ip6addrlbl_entry *p; 180 struct ip6addrlbl_entry *p;
@@ -161,7 +182,7 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex)
161 type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK; 182 type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK;
162 183
163 rcu_read_lock(); 184 rcu_read_lock();
164 p = __ipv6_addr_label(addr, type, ifindex); 185 p = __ipv6_addr_label(net, addr, type, ifindex);
165 label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; 186 label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT;
166 rcu_read_unlock(); 187 rcu_read_unlock();
167 188
@@ -174,7 +195,8 @@ u32 ipv6_addr_label(const struct in6_addr *addr, int type, int ifindex)
174} 195}
175 196
176/* allocate one entry */ 197/* allocate one entry */
177static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix, 198static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
199 const struct in6_addr *prefix,
178 int prefixlen, int ifindex, 200 int prefixlen, int ifindex,
179 u32 label) 201 u32 label)
180{ 202{
@@ -216,6 +238,9 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix,
216 newp->addrtype = addrtype; 238 newp->addrtype = addrtype;
217 newp->label = label; 239 newp->label = label;
218 INIT_HLIST_NODE(&newp->list); 240 INIT_HLIST_NODE(&newp->list);
241#ifdef CONFIG_NET_NS
242 newp->lbl_net = hold_net(net);
243#endif
219 atomic_set(&newp->refcnt, 1); 244 atomic_set(&newp->refcnt, 1);
220 return newp; 245 return newp;
221} 246}
@@ -237,6 +262,7 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
237 hlist_for_each_entry_safe(p, pos, n, 262 hlist_for_each_entry_safe(p, pos, n,
238 &ip6addrlbl_table.head, list) { 263 &ip6addrlbl_table.head, list) {
239 if (p->prefixlen == newp->prefixlen && 264 if (p->prefixlen == newp->prefixlen &&
265 net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
240 p->ifindex == newp->ifindex && 266 p->ifindex == newp->ifindex &&
241 ipv6_addr_equal(&p->prefix, &newp->prefix)) { 267 ipv6_addr_equal(&p->prefix, &newp->prefix)) {
242 if (!replace) { 268 if (!replace) {
@@ -261,7 +287,8 @@ out:
261} 287}
262 288
263/* add a label */ 289/* add a label */
264static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen, 290static int ip6addrlbl_add(struct net *net,
291 const struct in6_addr *prefix, int prefixlen,
265 int ifindex, u32 label, int replace) 292 int ifindex, u32 label, int replace)
266{ 293{
267 struct ip6addrlbl_entry *newp; 294 struct ip6addrlbl_entry *newp;
@@ -274,7 +301,7 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen,
274 (unsigned int)label, 301 (unsigned int)label,
275 replace); 302 replace);
276 303
277 newp = ip6addrlbl_alloc(prefix, prefixlen, ifindex, label); 304 newp = ip6addrlbl_alloc(net, prefix, prefixlen, ifindex, label);
278 if (IS_ERR(newp)) 305 if (IS_ERR(newp))
279 return PTR_ERR(newp); 306 return PTR_ERR(newp);
280 spin_lock(&ip6addrlbl_table.lock); 307 spin_lock(&ip6addrlbl_table.lock);
@@ -286,7 +313,8 @@ static int ip6addrlbl_add(const struct in6_addr *prefix, int prefixlen,
286} 313}
287 314
288/* remove a label */ 315/* remove a label */
289static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, 316static int __ip6addrlbl_del(struct net *net,
317 const struct in6_addr *prefix, int prefixlen,
290 int ifindex) 318 int ifindex)
291{ 319{
292 struct ip6addrlbl_entry *p = NULL; 320 struct ip6addrlbl_entry *p = NULL;
@@ -300,6 +328,7 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
300 328
301 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { 329 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
302 if (p->prefixlen == prefixlen && 330 if (p->prefixlen == prefixlen &&
331 net_eq(ip6addrlbl_net(p), net) &&
303 p->ifindex == ifindex && 332 p->ifindex == ifindex &&
304 ipv6_addr_equal(&p->prefix, prefix)) { 333 ipv6_addr_equal(&p->prefix, prefix)) {
305 hlist_del_rcu(&p->list); 334 hlist_del_rcu(&p->list);
@@ -311,7 +340,8 @@ static int __ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
311 return ret; 340 return ret;
312} 341}
313 342
314static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen, 343static int ip6addrlbl_del(struct net *net,
344 const struct in6_addr *prefix, int prefixlen,
315 int ifindex) 345 int ifindex)
316{ 346{
317 struct in6_addr prefix_buf; 347 struct in6_addr prefix_buf;
@@ -324,13 +354,13 @@ static int ip6addrlbl_del(const struct in6_addr *prefix, int prefixlen,
324 354
325 ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); 355 ipv6_addr_prefix(&prefix_buf, prefix, prefixlen);
326 spin_lock(&ip6addrlbl_table.lock); 356 spin_lock(&ip6addrlbl_table.lock);
327 ret = __ip6addrlbl_del(&prefix_buf, prefixlen, ifindex); 357 ret = __ip6addrlbl_del(net, &prefix_buf, prefixlen, ifindex);
328 spin_unlock(&ip6addrlbl_table.lock); 358 spin_unlock(&ip6addrlbl_table.lock);
329 return ret; 359 return ret;
330} 360}
331 361
332/* add default label */ 362/* add default label */
333static __init int ip6addrlbl_init(void) 363static int __net_init ip6addrlbl_net_init(struct net *net)
334{ 364{
335 int err = 0; 365 int err = 0;
336 int i; 366 int i;
@@ -338,7 +368,8 @@ static __init int ip6addrlbl_init(void)
338 ADDRLABEL(KERN_DEBUG "%s()\n", __func__); 368 ADDRLABEL(KERN_DEBUG "%s()\n", __func__);
339 369
340 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { 370 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
341 int ret = ip6addrlbl_add(ip6addrlbl_init_table[i].prefix, 371 int ret = ip6addrlbl_add(net,
372 ip6addrlbl_init_table[i].prefix,
342 ip6addrlbl_init_table[i].prefixlen, 373 ip6addrlbl_init_table[i].prefixlen,
343 0, 374 0,
344 ip6addrlbl_init_table[i].label, 0); 375 ip6addrlbl_init_table[i].label, 0);
@@ -349,11 +380,32 @@ static __init int ip6addrlbl_init(void)
349 return err; 380 return err;
350} 381}
351 382
383static void __net_exit ip6addrlbl_net_exit(struct net *net)
384{
385 struct ip6addrlbl_entry *p = NULL;
386 struct hlist_node *pos, *n;
387
388 /* Remove all labels belonging to the exiting net */
389 spin_lock(&ip6addrlbl_table.lock);
390 hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
391 if (net_eq(ip6addrlbl_net(p), net)) {
392 hlist_del_rcu(&p->list);
393 ip6addrlbl_put(p);
394 }
395 }
396 spin_unlock(&ip6addrlbl_table.lock);
397}
398
399static struct pernet_operations ipv6_addr_label_ops = {
400 .init = ip6addrlbl_net_init,
401 .exit = ip6addrlbl_net_exit,
402};
403
352int __init ipv6_addr_label_init(void) 404int __init ipv6_addr_label_init(void)
353{ 405{
354 spin_lock_init(&ip6addrlbl_table.lock); 406 spin_lock_init(&ip6addrlbl_table.lock);
355 407
356 return ip6addrlbl_init(); 408 return register_pernet_subsys(&ipv6_addr_label_ops);
357} 409}
358 410
359static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 411static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
@@ -371,9 +423,6 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
371 u32 label; 423 u32 label;
372 int err = 0; 424 int err = 0;
373 425
374 if (net != &init_net)
375 return 0;
376
377 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); 426 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy);
378 if (err < 0) 427 if (err < 0)
379 return err; 428 return err;
@@ -385,7 +434,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
385 return -EINVAL; 434 return -EINVAL;
386 435
387 if (ifal->ifal_index && 436 if (ifal->ifal_index &&
388 !__dev_get_by_index(&init_net, ifal->ifal_index)) 437 !__dev_get_by_index(net, ifal->ifal_index))
389 return -EINVAL; 438 return -EINVAL;
390 439
391 if (!tb[IFAL_ADDRESS]) 440 if (!tb[IFAL_ADDRESS])
@@ -403,12 +452,12 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
403 452
404 switch(nlh->nlmsg_type) { 453 switch(nlh->nlmsg_type) {
405 case RTM_NEWADDRLABEL: 454 case RTM_NEWADDRLABEL:
406 err = ip6addrlbl_add(pfx, ifal->ifal_prefixlen, 455 err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen,
407 ifal->ifal_index, label, 456 ifal->ifal_index, label,
408 nlh->nlmsg_flags & NLM_F_REPLACE); 457 nlh->nlmsg_flags & NLM_F_REPLACE);
409 break; 458 break;
410 case RTM_DELADDRLABEL: 459 case RTM_DELADDRLABEL:
411 err = ip6addrlbl_del(pfx, ifal->ifal_prefixlen, 460 err = ip6addrlbl_del(net, pfx, ifal->ifal_prefixlen,
412 ifal->ifal_index); 461 ifal->ifal_index);
413 break; 462 break;
414 default: 463 default:
@@ -458,12 +507,10 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
458 int idx = 0, s_idx = cb->args[0]; 507 int idx = 0, s_idx = cb->args[0];
459 int err; 508 int err;
460 509
461 if (net != &init_net)
462 return 0;
463
464 rcu_read_lock(); 510 rcu_read_lock();
465 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { 511 hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
466 if (idx >= s_idx) { 512 if (idx >= s_idx &&
513 net_eq(ip6addrlbl_net(p), net)) {
467 if ((err = ip6addrlbl_fill(skb, p, 514 if ((err = ip6addrlbl_fill(skb, p,
468 ip6addrlbl_table.seq, 515 ip6addrlbl_table.seq,
469 NETLINK_CB(cb->skb).pid, 516 NETLINK_CB(cb->skb).pid,
@@ -499,9 +546,6 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
499 struct ip6addrlbl_entry *p; 546 struct ip6addrlbl_entry *p;
500 struct sk_buff *skb; 547 struct sk_buff *skb;
501 548
502 if (net != &init_net)
503 return 0;
504
505 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); 549 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy);
506 if (err < 0) 550 if (err < 0)
507 return err; 551 return err;
@@ -513,7 +557,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
513 return -EINVAL; 557 return -EINVAL;
514 558
515 if (ifal->ifal_index && 559 if (ifal->ifal_index &&
516 !__dev_get_by_index(&init_net, ifal->ifal_index)) 560 !__dev_get_by_index(net, ifal->ifal_index))
517 return -EINVAL; 561 return -EINVAL;
518 562
519 if (!tb[IFAL_ADDRESS]) 563 if (!tb[IFAL_ADDRESS])
@@ -524,7 +568,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
524 return -EINVAL; 568 return -EINVAL;
525 569
526 rcu_read_lock(); 570 rcu_read_lock();
527 p = __ipv6_addr_label(addr, ipv6_addr_type(addr), ifal->ifal_index); 571 p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
528 if (p && ip6addrlbl_hold(p)) 572 if (p && ip6addrlbl_hold(p))
529 p = NULL; 573 p = NULL;
530 lseq = ip6addrlbl_table.seq; 574 lseq = ip6addrlbl_table.seq;
@@ -552,7 +596,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
552 goto out; 596 goto out;
553 } 597 }
554 598
555 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 599 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
556out: 600out:
557 return err; 601 return err;
558} 602}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e84b3fd17fb4..3d828bc4b1cf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Adapted from linux/net/ipv4/af_inet.c 8 * Adapted from linux/net/ipv4/af_inet.c
9 * 9 *
10 * $Id: af_inet6.c,v 1.66 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * piggy, Karl Knutson : Socket protocol table 11 * piggy, Karl Knutson : Socket protocol table
14 * Hideaki YOSHIFUJI : sin6_scope_id support 12 * Hideaki YOSHIFUJI : sin6_scope_id support
@@ -61,9 +59,7 @@
61 59
62#include <asm/uaccess.h> 60#include <asm/uaccess.h>
63#include <asm/system.h> 61#include <asm/system.h>
64#ifdef CONFIG_IPV6_MROUTE
65#include <linux/mroute6.h> 62#include <linux/mroute6.h>
66#endif
67 63
68MODULE_AUTHOR("Cast of dozens"); 64MODULE_AUTHOR("Cast of dozens");
69MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); 65MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
@@ -373,7 +369,7 @@ int inet6_release(struct socket *sock)
373 369
374EXPORT_SYMBOL(inet6_release); 370EXPORT_SYMBOL(inet6_release);
375 371
376int inet6_destroy_sock(struct sock *sk) 372void inet6_destroy_sock(struct sock *sk)
377{ 373{
378 struct ipv6_pinfo *np = inet6_sk(sk); 374 struct ipv6_pinfo *np = inet6_sk(sk);
379 struct sk_buff *skb; 375 struct sk_buff *skb;
@@ -391,8 +387,6 @@ int inet6_destroy_sock(struct sock *sk)
391 387
392 if ((opt = xchg(&np->opt, NULL)) != NULL) 388 if ((opt = xchg(&np->opt, NULL)) != NULL)
393 sock_kfree_s(sk, opt, opt->tot_len); 389 sock_kfree_s(sk, opt, opt->tot_len);
394
395 return 0;
396} 390}
397 391
398EXPORT_SYMBOL_GPL(inet6_destroy_sock); 392EXPORT_SYMBOL_GPL(inet6_destroy_sock);
@@ -956,9 +950,9 @@ static int __init inet6_init(void)
956 err = icmpv6_init(); 950 err = icmpv6_init();
957 if (err) 951 if (err)
958 goto icmp_fail; 952 goto icmp_fail;
959#ifdef CONFIG_IPV6_MROUTE 953 err = ip6_mr_init();
960 ip6_mr_init(); 954 if (err)
961#endif 955 goto ipmr_fail;
962 err = ndisc_init(); 956 err = ndisc_init();
963 if (err) 957 if (err)
964 goto ndisc_fail; 958 goto ndisc_fail;
@@ -1061,6 +1055,8 @@ netfilter_fail:
1061igmp_fail: 1055igmp_fail:
1062 ndisc_cleanup(); 1056 ndisc_cleanup();
1063ndisc_fail: 1057ndisc_fail:
1058 ip6_mr_cleanup();
1059ipmr_fail:
1064 icmpv6_cleanup(); 1060 icmpv6_cleanup();
1065icmp_fail: 1061icmp_fail:
1066 unregister_pernet_subsys(&inet6_net_ops); 1062 unregister_pernet_subsys(&inet6_net_ops);
@@ -1115,6 +1111,7 @@ static void __exit inet6_exit(void)
1115 ipv6_netfilter_fini(); 1111 ipv6_netfilter_fini();
1116 igmp6_cleanup(); 1112 igmp6_cleanup();
1117 ndisc_cleanup(); 1113 ndisc_cleanup();
1114 ip6_mr_cleanup();
1118 icmpv6_cleanup(); 1115 icmpv6_cleanup();
1119 rawv6_exit(); 1116 rawv6_exit();
1120 1117
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 0f0f94a40335..f7b535dec860 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: datagram.c,v 1.24 2002/02/01 22:01:04 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3cd1c993d52b..602ea826f0a5 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -7,8 +7,6 @@
7 * Andi Kleen <ak@muc.de> 7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 * 9 *
10 * $Id: exthdrs.c,v 1.13 2001/06/19 15:58:56 davem Exp $
11 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index d42dd16d3487..abedf95fdf2d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
9 *
10 * Based on net/ipv4/icmp.c 8 * Based on net/ipv4/icmp.c
11 * 9 *
12 * RFC 1885 10 * RFC 1885
@@ -956,7 +954,8 @@ ctl_table ipv6_icmp_table_template[] = {
956 .data = &init_net.ipv6.sysctl.icmpv6_time, 954 .data = &init_net.ipv6.sysctl.icmpv6_time,
957 .maxlen = sizeof(int), 955 .maxlen = sizeof(int),
958 .mode = 0644, 956 .mode = 0644,
959 .proc_handler = &proc_dointvec 957 .proc_handler = &proc_dointvec_ms_jiffies,
958 .strategy = &sysctl_ms_jiffies
960 }, 959 },
961 { .ctl_name = 0 }, 960 { .ctl_name = 0 },
962}; 961};
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 580014aea4d6..a9cc8ab33a49 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -68,7 +68,7 @@ struct sock *__inet6_lookup_established(struct net *net,
68 /* Optimize here for direct hit, only listening connections can 68 /* Optimize here for direct hit, only listening connections can
69 * have wildcards anyways. 69 * have wildcards anyways.
70 */ 70 */
71 unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); 71 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
72 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); 72 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
73 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash); 73 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
74 74
@@ -104,7 +104,8 @@ struct sock *inet6_lookup_listener(struct net *net,
104 int score, hiscore = 0; 104 int score, hiscore = 0;
105 105
106 read_lock(&hashinfo->lhash_lock); 106 read_lock(&hashinfo->lhash_lock);
107 sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { 107 sk_for_each(sk, node,
108 &hashinfo->listening_hash[inet_lhashfn(net, hnum)]) {
108 if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && 109 if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum &&
109 sk->sk_family == PF_INET6) { 110 sk->sk_family == PF_INET6) {
110 const struct ipv6_pinfo *np = inet6_sk(sk); 111 const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -165,14 +166,14 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
165 const struct in6_addr *saddr = &np->daddr; 166 const struct in6_addr *saddr = &np->daddr;
166 const int dif = sk->sk_bound_dev_if; 167 const int dif = sk->sk_bound_dev_if;
167 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); 168 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
168 const unsigned int hash = inet6_ehashfn(daddr, lport, saddr, 169 struct net *net = sock_net(sk);
170 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr,
169 inet->dport); 171 inet->dport);
170 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 172 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
171 rwlock_t *lock = inet_ehash_lockp(hinfo, hash); 173 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
172 struct sock *sk2; 174 struct sock *sk2;
173 const struct hlist_node *node; 175 const struct hlist_node *node;
174 struct inet_timewait_sock *tw; 176 struct inet_timewait_sock *tw;
175 struct net *net = sock_net(sk);
176 177
177 prefetch(head->chain.first); 178 prefetch(head->chain.first);
178 write_lock(lock); 179 write_lock(lock);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1ee4fa17c129..4de2b9efcacb 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 17eb48b8e329..ea81c614dde2 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Ian P. Morris <I.P.Morris@soton.ac.uk> 7 * Ian P. Morris <I.P.Morris@soton.ac.uk>
8 * 8 *
9 * $Id: ip6_input.c,v 1.19 2000/12/13 18:31:50 davem Exp $
10 *
11 * Based in linux/net/ipv4/ip_input.c 9 * Based in linux/net/ipv4/ip_input.c
12 * 10 *
13 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
@@ -73,7 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
73 71
74 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); 72 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES);
75 73
76 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 74 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
75 !idev || unlikely(idev->cnf.disable_ipv6)) {
77 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); 76 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
78 rcu_read_unlock(); 77 rcu_read_unlock();
79 goto out; 78 goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 48cdce9c696c..0981c1ef3057 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on linux/net/ipv4/ip_output.c 8 * Based on linux/net/ipv4/ip_output.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -175,6 +173,13 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
175 173
176int ip6_output(struct sk_buff *skb) 174int ip6_output(struct sk_buff *skb)
177{ 175{
176 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
177 if (unlikely(idev->cnf.disable_ipv6)) {
178 IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
179 kfree_skb(skb);
180 return 0;
181 }
182
178 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 183 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
179 dst_allfrag(skb->dst)) 184 dst_allfrag(skb->dst))
180 return ip6_fragment(skb, ip6_output2); 185 return ip6_fragment(skb, ip6_output2);
@@ -409,6 +414,9 @@ int ip6_forward(struct sk_buff *skb)
409 if (ipv6_devconf.forwarding == 0) 414 if (ipv6_devconf.forwarding == 0)
410 goto error; 415 goto error;
411 416
417 if (skb_warn_if_lro(skb))
418 goto drop;
419
412 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { 420 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
413 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); 421 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
414 goto drop; 422 goto drop;
@@ -497,7 +505,8 @@ int ip6_forward(struct sk_buff *skb)
497 int addrtype = ipv6_addr_type(&hdr->saddr); 505 int addrtype = ipv6_addr_type(&hdr->saddr);
498 506
499 /* This check is security critical. */ 507 /* This check is security critical. */
500 if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK)) 508 if (addrtype == IPV6_ADDR_ANY ||
509 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
501 goto error; 510 goto error;
502 if (addrtype & IPV6_ADDR_LINKLOCAL) { 511 if (addrtype & IPV6_ADDR_LINKLOCAL) {
503 icmpv6_send(skb, ICMPV6_DEST_UNREACH, 512 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2bda3ba100b1..17c7b098cdb0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -6,8 +6,6 @@
6 * Ville Nuorvala <vnuorval@tcs.hut.fi> 6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org> 7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 * 8 *
9 * $Id$
10 *
11 * Based on: 9 * Based on:
12 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c 10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
13 * 11 *
@@ -711,7 +709,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
711 } 709 }
712 710
713 if (!ip6_tnl_rcv_ctl(t)) { 711 if (!ip6_tnl_rcv_ctl(t)) {
714 t->stat.rx_dropped++; 712 t->dev->stats.rx_dropped++;
715 read_unlock(&ip6_tnl_lock); 713 read_unlock(&ip6_tnl_lock);
716 goto discard; 714 goto discard;
717 } 715 }
@@ -728,8 +726,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
728 726
729 dscp_ecn_decapsulate(t, ipv6h, skb); 727 dscp_ecn_decapsulate(t, ipv6h, skb);
730 728
731 t->stat.rx_packets++; 729 t->dev->stats.rx_packets++;
732 t->stat.rx_bytes += skb->len; 730 t->dev->stats.rx_bytes += skb->len;
733 netif_rx(skb); 731 netif_rx(skb);
734 read_unlock(&ip6_tnl_lock); 732 read_unlock(&ip6_tnl_lock);
735 return 0; 733 return 0;
@@ -849,7 +847,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
849 __u32 *pmtu) 847 __u32 *pmtu)
850{ 848{
851 struct ip6_tnl *t = netdev_priv(dev); 849 struct ip6_tnl *t = netdev_priv(dev);
852 struct net_device_stats *stats = &t->stat; 850 struct net_device_stats *stats = &t->dev->stats;
853 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 851 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
854 struct ipv6_tel_txoption opt; 852 struct ipv6_tel_txoption opt;
855 struct dst_entry *dst; 853 struct dst_entry *dst;
@@ -1043,11 +1041,11 @@ static int
1043ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1041ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1044{ 1042{
1045 struct ip6_tnl *t = netdev_priv(dev); 1043 struct ip6_tnl *t = netdev_priv(dev);
1046 struct net_device_stats *stats = &t->stat; 1044 struct net_device_stats *stats = &t->dev->stats;
1047 int ret; 1045 int ret;
1048 1046
1049 if (t->recursion++) { 1047 if (t->recursion++) {
1050 t->stat.collisions++; 1048 stats->collisions++;
1051 goto tx_err; 1049 goto tx_err;
1052 } 1050 }
1053 1051
@@ -1289,19 +1287,6 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1289} 1287}
1290 1288
1291/** 1289/**
1292 * ip6_tnl_get_stats - return the stats for tunnel device
1293 * @dev: virtual device associated with tunnel
1294 *
1295 * Return: stats for device
1296 **/
1297
1298static struct net_device_stats *
1299ip6_tnl_get_stats(struct net_device *dev)
1300{
1301 return &(((struct ip6_tnl *)netdev_priv(dev))->stat);
1302}
1303
1304/**
1305 * ip6_tnl_change_mtu - change mtu manually for tunnel device 1290 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1306 * @dev: virtual device associated with tunnel 1291 * @dev: virtual device associated with tunnel
1307 * @new_mtu: the new mtu 1292 * @new_mtu: the new mtu
@@ -1334,7 +1319,6 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
1334 dev->uninit = ip6_tnl_dev_uninit; 1319 dev->uninit = ip6_tnl_dev_uninit;
1335 dev->destructor = free_netdev; 1320 dev->destructor = free_netdev;
1336 dev->hard_start_xmit = ip6_tnl_xmit; 1321 dev->hard_start_xmit = ip6_tnl_xmit;
1337 dev->get_stats = ip6_tnl_get_stats;
1338 dev->do_ioctl = ip6_tnl_ioctl; 1322 dev->do_ioctl = ip6_tnl_ioctl;
1339 dev->change_mtu = ip6_tnl_change_mtu; 1323 dev->change_mtu = ip6_tnl_change_mtu;
1340 1324
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 14796181e8b5..cfac26d674ed 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -388,8 +388,8 @@ static int pim6_rcv(struct sk_buff *skb)
388 skb->ip_summed = 0; 388 skb->ip_summed = 0;
389 skb->pkt_type = PACKET_HOST; 389 skb->pkt_type = PACKET_HOST;
390 dst_release(skb->dst); 390 dst_release(skb->dst);
391 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_bytes += skb->len; 391 reg_dev->stats.rx_bytes += skb->len;
392 ((struct net_device_stats *)netdev_priv(reg_dev))->rx_packets++; 392 reg_dev->stats.rx_packets++;
393 skb->dst = NULL; 393 skb->dst = NULL;
394 nf_reset(skb); 394 nf_reset(skb);
395 netif_rx(skb); 395 netif_rx(skb);
@@ -409,26 +409,20 @@ static struct inet6_protocol pim6_protocol = {
409static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 409static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
410{ 410{
411 read_lock(&mrt_lock); 411 read_lock(&mrt_lock);
412 ((struct net_device_stats *)netdev_priv(dev))->tx_bytes += skb->len; 412 dev->stats.tx_bytes += skb->len;
413 ((struct net_device_stats *)netdev_priv(dev))->tx_packets++; 413 dev->stats.tx_packets++;
414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT); 414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
415 read_unlock(&mrt_lock); 415 read_unlock(&mrt_lock);
416 kfree_skb(skb); 416 kfree_skb(skb);
417 return 0; 417 return 0;
418} 418}
419 419
420static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
421{
422 return (struct net_device_stats *)netdev_priv(dev);
423}
424
425static void reg_vif_setup(struct net_device *dev) 420static void reg_vif_setup(struct net_device *dev)
426{ 421{
427 dev->type = ARPHRD_PIMREG; 422 dev->type = ARPHRD_PIMREG;
428 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 423 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
429 dev->flags = IFF_NOARP; 424 dev->flags = IFF_NOARP;
430 dev->hard_start_xmit = reg_vif_xmit; 425 dev->hard_start_xmit = reg_vif_xmit;
431 dev->get_stats = reg_vif_get_stats;
432 dev->destructor = free_netdev; 426 dev->destructor = free_netdev;
433} 427}
434 428
@@ -436,9 +430,7 @@ static struct net_device *ip6mr_reg_vif(void)
436{ 430{
437 struct net_device *dev; 431 struct net_device *dev;
438 432
439 dev = alloc_netdev(sizeof(struct net_device_stats), "pim6reg", 433 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
440 reg_vif_setup);
441
442 if (dev == NULL) 434 if (dev == NULL)
443 return NULL; 435 return NULL;
444 436
@@ -956,23 +948,51 @@ static struct notifier_block ip6_mr_notifier = {
956 * Setup for IP multicast routing 948 * Setup for IP multicast routing
957 */ 949 */
958 950
959void __init ip6_mr_init(void) 951int __init ip6_mr_init(void)
960{ 952{
953 int err;
954
961 mrt_cachep = kmem_cache_create("ip6_mrt_cache", 955 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
962 sizeof(struct mfc6_cache), 956 sizeof(struct mfc6_cache),
963 0, SLAB_HWCACHE_ALIGN, 957 0, SLAB_HWCACHE_ALIGN,
964 NULL); 958 NULL);
965 if (!mrt_cachep) 959 if (!mrt_cachep)
966 panic("cannot allocate ip6_mrt_cache"); 960 return -ENOMEM;
967 961
968 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); 962 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
969 register_netdevice_notifier(&ip6_mr_notifier); 963 err = register_netdevice_notifier(&ip6_mr_notifier);
964 if (err)
965 goto reg_notif_fail;
970#ifdef CONFIG_PROC_FS 966#ifdef CONFIG_PROC_FS
971 proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops); 967 err = -ENOMEM;
972 proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops); 968 if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
969 goto proc_vif_fail;
970 if (!proc_net_fops_create(&init_net, "ip6_mr_cache",
971 0, &ip6mr_mfc_fops))
972 goto proc_cache_fail;
973#endif 973#endif
974 return 0;
975reg_notif_fail:
976 kmem_cache_destroy(mrt_cachep);
977#ifdef CONFIG_PROC_FS
978proc_vif_fail:
979 unregister_netdevice_notifier(&ip6_mr_notifier);
980proc_cache_fail:
981 proc_net_remove(&init_net, "ip6_mr_vif");
982#endif
983 return err;
974} 984}
975 985
986void ip6_mr_cleanup(void)
987{
988#ifdef CONFIG_PROC_FS
989 proc_net_remove(&init_net, "ip6_mr_cache");
990 proc_net_remove(&init_net, "ip6_mr_vif");
991#endif
992 unregister_netdevice_notifier(&ip6_mr_notifier);
993 del_timer(&ipmr_expire_timer);
994 kmem_cache_destroy(mrt_cachep);
995}
976 996
977static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock) 997static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
978{ 998{
@@ -1248,7 +1268,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int
1248 1268
1249#endif 1269#endif
1250 /* 1270 /*
1251 * Spurious command, or MRT_VERSION which you cannot 1271 * Spurious command, or MRT6_VERSION which you cannot
1252 * set. 1272 * set.
1253 */ 1273 */
1254 default: 1274 default:
@@ -1377,8 +1397,8 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1377 if (vif->flags & MIFF_REGISTER) { 1397 if (vif->flags & MIFF_REGISTER) {
1378 vif->pkt_out++; 1398 vif->pkt_out++;
1379 vif->bytes_out += skb->len; 1399 vif->bytes_out += skb->len;
1380 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_bytes += skb->len; 1400 vif->dev->stats.tx_bytes += skb->len;
1381 ((struct net_device_stats *)netdev_priv(vif->dev))->tx_packets++; 1401 vif->dev->stats.tx_packets++;
1382 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT); 1402 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1383 kfree_skb(skb); 1403 kfree_skb(skb);
1384 return 0; 1404 return 0;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 86e28a75267f..030c0c956f9d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Based on linux/net/ipv4/ip_sockglue.c 8 * Based on linux/net/ipv4/ip_sockglue.c
9 * 9 *
10 * $Id: ipv6_sockglue.c,v 1.41 2002/02/01 22:01:04 davem Exp $
11 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index fd632dd7f98d..bd2fe4cfafa7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: mcast.c,v 1.40 2002/02/08 03:57:19 davem Exp $
9 *
10 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c 8 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -164,7 +162,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
164 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ 162 ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
165 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) 163 (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
166 164
167#define MLDV2_QQIC(value) MLDV2_EXP(0x80, 4, 3, value)
168#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) 165#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
169 166
170#define IPV6_MLD_MAX_MSF 64 167#define IPV6_MLD_MAX_MSF 64
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 6cae5475737e..689dec899c57 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -208,5 +208,17 @@ config IP6_NF_RAW
208 If you want to compile it as a module, say M here and read 208 If you want to compile it as a module, say M here and read
209 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'. 209 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
210 210
211# security table for MAC policy
212config IP6_NF_SECURITY
213 tristate "Security table"
214 depends on IP6_NF_IPTABLES
215 depends on SECURITY
216 default m if NETFILTER_ADVANCED=n
217 help
218 This option adds a `security' table to iptables, for use
219 with Mandatory Access Control (MAC) policy.
220
221 If unsure, say N.
222
211endmenu 223endmenu
212 224
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index fbf2c14ed887..3f17c948eefb 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o 9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
11obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
11 12
12# objects for l3 independent conntrack 13# objects for l3 independent conntrack
13nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o 14nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 2eff3ae8977d..1b8815f6153d 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -159,7 +159,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
159 case IPQ_COPY_META: 159 case IPQ_COPY_META:
160 case IPQ_COPY_NONE: 160 case IPQ_COPY_NONE:
161 size = NLMSG_SPACE(sizeof(*pmsg)); 161 size = NLMSG_SPACE(sizeof(*pmsg));
162 data_len = 0;
163 break; 162 break;
164 163
165 case IPQ_COPY_PACKET: 164 case IPQ_COPY_PACKET:
@@ -226,8 +225,6 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
226 return skb; 225 return skb;
227 226
228nlmsg_failure: 227nlmsg_failure:
229 if (skb)
230 kfree_skb(skb);
231 *errp = -EINVAL; 228 *errp = -EINVAL;
232 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 229 printk(KERN_ERR "ip6_queue: error creating packet message\n");
233 return NULL; 230 return NULL;
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
new file mode 100644
index 000000000000..063a3d9c3c67
--- /dev/null
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -0,0 +1,172 @@
1/*
2 * "security" table for IPv6
3 *
4 * This is for use by Mandatory Access Control (MAC) security models,
5 * which need to be able to manage security policy in separate context
6 * to DAC.
7 *
8 * Based on iptable_mangle.c
9 *
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
12 * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/module.h>
19#include <linux/netfilter_ipv6/ip6_tables.h>
20
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
23MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
24
25#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
26 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT)
28
29static struct
30{
31 struct ip6t_replace repl;
32 struct ip6t_standard entries[3];
33 struct ip6t_error term;
34} initial_table __initdata = {
35 .repl = {
36 .name = "security",
37 .valid_hooks = SECURITY_VALID_HOOKS,
38 .num_entries = 4,
39 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
40 .hook_entry = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
44 },
45 .underflow = {
46 [NF_INET_LOCAL_IN] = 0,
47 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
48 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
49 },
50 },
51 .entries = {
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 },
56 .term = IP6T_ERROR_INIT, /* ERROR */
57};
58
59static struct xt_table security_table = {
60 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS,
62 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
63 .me = THIS_MODULE,
64 .af = AF_INET6,
65};
66
67static unsigned int
68ip6t_local_in_hook(unsigned int hook,
69 struct sk_buff *skb,
70 const struct net_device *in,
71 const struct net_device *out,
72 int (*okfn)(struct sk_buff *))
73{
74 return ip6t_do_table(skb, hook, in, out,
75 init_net.ipv6.ip6table_security);
76}
77
78static unsigned int
79ip6t_forward_hook(unsigned int hook,
80 struct sk_buff *skb,
81 const struct net_device *in,
82 const struct net_device *out,
83 int (*okfn)(struct sk_buff *))
84{
85 return ip6t_do_table(skb, hook, in, out,
86 init_net.ipv6.ip6table_security);
87}
88
89static unsigned int
90ip6t_local_out_hook(unsigned int hook,
91 struct sk_buff *skb,
92 const struct net_device *in,
93 const struct net_device *out,
94 int (*okfn)(struct sk_buff *))
95{
96 /* TBD: handle short packets via raw socket */
97 return ip6t_do_table(skb, hook, in, out,
98 init_net.ipv6.ip6table_security);
99}
100
101static struct nf_hook_ops ip6t_ops[] __read_mostly = {
102 {
103 .hook = ip6t_local_in_hook,
104 .owner = THIS_MODULE,
105 .pf = PF_INET6,
106 .hooknum = NF_INET_LOCAL_IN,
107 .priority = NF_IP6_PRI_SECURITY,
108 },
109 {
110 .hook = ip6t_forward_hook,
111 .owner = THIS_MODULE,
112 .pf = PF_INET6,
113 .hooknum = NF_INET_FORWARD,
114 .priority = NF_IP6_PRI_SECURITY,
115 },
116 {
117 .hook = ip6t_local_out_hook,
118 .owner = THIS_MODULE,
119 .pf = PF_INET6,
120 .hooknum = NF_INET_LOCAL_OUT,
121 .priority = NF_IP6_PRI_SECURITY,
122 },
123};
124
125static int __net_init ip6table_security_net_init(struct net *net)
126{
127 net->ipv6.ip6table_security =
128 ip6t_register_table(net, &security_table, &initial_table.repl);
129
130 if (IS_ERR(net->ipv6.ip6table_security))
131 return PTR_ERR(net->ipv6.ip6table_security);
132
133 return 0;
134}
135
136static void __net_exit ip6table_security_net_exit(struct net *net)
137{
138 ip6t_unregister_table(net->ipv6.ip6table_security);
139}
140
141static struct pernet_operations ip6table_security_net_ops = {
142 .init = ip6table_security_net_init,
143 .exit = ip6table_security_net_exit,
144};
145
146static int __init ip6table_security_init(void)
147{
148 int ret;
149
150 ret = register_pernet_subsys(&ip6table_security_net_ops);
151 if (ret < 0)
152 return ret;
153
154 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
155 if (ret < 0)
156 goto cleanup_table;
157
158 return ret;
159
160cleanup_table:
161 unregister_pernet_subsys(&ip6table_security_net_ops);
162 return ret;
163}
164
165static void __exit ip6table_security_fini(void)
166{
167 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
168 unregister_pernet_subsys(&ip6table_security_net_ops);
169}
170
171module_init(ip6table_security_init);
172module_exit(ip6table_security_fini);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index ee713b03e9ec..14d47d833545 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -89,9 +89,8 @@ static int icmpv6_packet(struct nf_conn *ct,
89 means this will only run once even if count hits zero twice 89 means this will only run once even if count hits zero twice
90 (theoretically possible with SMP) */ 90 (theoretically possible with SMP) */
91 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { 91 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
92 if (atomic_dec_and_test(&ct->proto.icmp.count) 92 if (atomic_dec_and_test(&ct->proto.icmp.count))
93 && del_timer(&ct->timeout)) 93 nf_ct_kill_acct(ct, ctinfo, skb);
94 ct->timeout.function((unsigned long)ct);
95 } else { 94 } else {
96 atomic_inc(&ct->proto.icmp.count); 95 atomic_inc(&ct->proto.icmp.count);
97 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); 96 nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index df0736a4cafa..cbc7e514d3ec 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -7,8 +7,6 @@
7 * PROC file system. This is very similar to the IPv4 version, 7 * PROC file system. This is very similar to the IPv4 version,
8 * except it reports the sockets in the INET6 address family. 8 * except it reports the sockets in the INET6 address family.
9 * 9 *
10 * Version: $Id: proc.c,v 1.17 2002/02/01 22:01:04 davem Exp $
11 *
12 * Authors: David S. Miller (davem@caip.rutgers.edu) 10 * Authors: David S. Miller (davem@caip.rutgers.edu)
13 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
14 * 12 *
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index f929f47b925e..9ab789159913 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PF_INET6 protocol dispatch tables. 6 * PF_INET6 protocol dispatch tables.
7 * 7 *
8 * Version: $Id: protocol.c,v 1.10 2001/05/18 02:25:49 davem Exp $
9 *
10 * Authors: Pedro Roque <roque@di.fc.ul.pt> 8 * Authors: Pedro Roque <roque@di.fc.ul.pt>
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 3aee12310d94..34cfb3f41c2c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Adapted from linux/net/ipv4/raw.c 8 * Adapted from linux/net/ipv4/raw.c
9 * 9 *
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support 11 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) 12 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
@@ -1164,13 +1162,13 @@ static void rawv6_close(struct sock *sk, long timeout)
1164 sk_common_release(sk); 1162 sk_common_release(sk);
1165} 1163}
1166 1164
1167static int raw6_destroy(struct sock *sk) 1165static void raw6_destroy(struct sock *sk)
1168{ 1166{
1169 lock_sock(sk); 1167 lock_sock(sk);
1170 ip6_flush_pending_frames(sk); 1168 ip6_flush_pending_frames(sk);
1171 release_sock(sk); 1169 release_sock(sk);
1172 1170
1173 return inet6_destroy_sock(sk); 1171 inet6_destroy_sock(sk);
1174} 1172}
1175 1173
1176static int rawv6_init_sk(struct sock *sk) 1174static int rawv6_init_sk(struct sock *sk)
@@ -1253,7 +1251,7 @@ static int raw6_seq_show(struct seq_file *seq, void *v)
1253 "local_address " 1251 "local_address "
1254 "remote_address " 1252 "remote_address "
1255 "st tx_queue rx_queue tr tm->when retrnsmt" 1253 "st tx_queue rx_queue tr tm->when retrnsmt"
1256 " uid timeout inode drops\n"); 1254 " uid timeout inode ref pointer drops\n");
1257 else 1255 else
1258 raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); 1256 raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
1259 return 0; 1257 return 0;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index a60d7d129713..6ab957ec2dd6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9 *
10 * Based on: net/ipv4/ip_fragment.c 8 * Based on: net/ipv4/ip_fragment.c
11 * 9 *
12 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
@@ -634,7 +632,7 @@ static struct inet6_protocol frag_protocol =
634}; 632};
635 633
636#ifdef CONFIG_SYSCTL 634#ifdef CONFIG_SYSCTL
637static struct ctl_table ip6_frags_ctl_table[] = { 635static struct ctl_table ip6_frags_ns_ctl_table[] = {
638 { 636 {
639 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, 637 .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH,
640 .procname = "ip6frag_high_thresh", 638 .procname = "ip6frag_high_thresh",
@@ -660,6 +658,10 @@ static struct ctl_table ip6_frags_ctl_table[] = {
660 .proc_handler = &proc_dointvec_jiffies, 658 .proc_handler = &proc_dointvec_jiffies,
661 .strategy = &sysctl_jiffies, 659 .strategy = &sysctl_jiffies,
662 }, 660 },
661 { }
662};
663
664static struct ctl_table ip6_frags_ctl_table[] = {
663 { 665 {
664 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL, 666 .ctl_name = NET_IPV6_IP6FRAG_SECRET_INTERVAL,
665 .procname = "ip6frag_secret_interval", 667 .procname = "ip6frag_secret_interval",
@@ -672,21 +674,20 @@ static struct ctl_table ip6_frags_ctl_table[] = {
672 { } 674 { }
673}; 675};
674 676
675static int ip6_frags_sysctl_register(struct net *net) 677static int ip6_frags_ns_sysctl_register(struct net *net)
676{ 678{
677 struct ctl_table *table; 679 struct ctl_table *table;
678 struct ctl_table_header *hdr; 680 struct ctl_table_header *hdr;
679 681
680 table = ip6_frags_ctl_table; 682 table = ip6_frags_ns_ctl_table;
681 if (net != &init_net) { 683 if (net != &init_net) {
682 table = kmemdup(table, sizeof(ip6_frags_ctl_table), GFP_KERNEL); 684 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
683 if (table == NULL) 685 if (table == NULL)
684 goto err_alloc; 686 goto err_alloc;
685 687
686 table[0].data = &net->ipv6.frags.high_thresh; 688 table[0].data = &net->ipv6.frags.high_thresh;
687 table[1].data = &net->ipv6.frags.low_thresh; 689 table[1].data = &net->ipv6.frags.low_thresh;
688 table[2].data = &net->ipv6.frags.timeout; 690 table[2].data = &net->ipv6.frags.timeout;
689 table[3].mode &= ~0222;
690 } 691 }
691 692
692 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 693 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
@@ -703,7 +704,7 @@ err_alloc:
703 return -ENOMEM; 704 return -ENOMEM;
704} 705}
705 706
706static void ip6_frags_sysctl_unregister(struct net *net) 707static void ip6_frags_ns_sysctl_unregister(struct net *net)
707{ 708{
708 struct ctl_table *table; 709 struct ctl_table *table;
709 710
@@ -711,13 +712,36 @@ static void ip6_frags_sysctl_unregister(struct net *net)
711 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 712 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
712 kfree(table); 713 kfree(table);
713} 714}
715
716static struct ctl_table_header *ip6_ctl_header;
717
718static int ip6_frags_sysctl_register(void)
719{
720 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
721 ip6_frags_ctl_table);
722 return ip6_ctl_header == NULL ? -ENOMEM : 0;
723}
724
725static void ip6_frags_sysctl_unregister(void)
726{
727 unregister_net_sysctl_table(ip6_ctl_header);
728}
714#else 729#else
715static inline int ip6_frags_sysctl_register(struct net *net) 730static inline int ip6_frags_ns_sysctl_register(struct net *net)
716{ 731{
717 return 0; 732 return 0;
718} 733}
719 734
720static inline void ip6_frags_sysctl_unregister(struct net *net) 735static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
736{
737}
738
739static inline int ip6_frags_sysctl_register(void)
740{
741 return 0;
742}
743
744static inline void ip6_frags_sysctl_unregister(void)
721{ 745{
722} 746}
723#endif 747#endif
@@ -730,12 +754,12 @@ static int ipv6_frags_init_net(struct net *net)
730 754
731 inet_frags_init_net(&net->ipv6.frags); 755 inet_frags_init_net(&net->ipv6.frags);
732 756
733 return ip6_frags_sysctl_register(net); 757 return ip6_frags_ns_sysctl_register(net);
734} 758}
735 759
736static void ipv6_frags_exit_net(struct net *net) 760static void ipv6_frags_exit_net(struct net *net)
737{ 761{
738 ip6_frags_sysctl_unregister(net); 762 ip6_frags_ns_sysctl_unregister(net);
739 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 763 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
740} 764}
741 765
@@ -752,7 +776,13 @@ int __init ipv6_frag_init(void)
752 if (ret) 776 if (ret)
753 goto out; 777 goto out;
754 778
755 register_pernet_subsys(&ip6_frags_ops); 779 ret = ip6_frags_sysctl_register();
780 if (ret)
781 goto err_sysctl;
782
783 ret = register_pernet_subsys(&ip6_frags_ops);
784 if (ret)
785 goto err_pernet;
756 786
757 ip6_frags.hashfn = ip6_hashfn; 787 ip6_frags.hashfn = ip6_hashfn;
758 ip6_frags.constructor = ip6_frag_init; 788 ip6_frags.constructor = ip6_frag_init;
@@ -765,11 +795,18 @@ int __init ipv6_frag_init(void)
765 inet_frags_init(&ip6_frags); 795 inet_frags_init(&ip6_frags);
766out: 796out:
767 return ret; 797 return ret;
798
799err_pernet:
800 ip6_frags_sysctl_unregister();
801err_sysctl:
802 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
803 goto out;
768} 804}
769 805
770void ipv6_frag_exit(void) 806void ipv6_frag_exit(void)
771{ 807{
772 inet_frags_fini(&ip6_frags); 808 inet_frags_fini(&ip6_frags);
809 ip6_frags_sysctl_unregister();
773 unregister_pernet_subsys(&ip6_frags_ops); 810 unregister_pernet_subsys(&ip6_frags_ops);
774 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 811 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
775} 812}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7ff687020fa9..5d6c166dfbb6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -230,7 +228,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt)
230static inline int rt6_need_strict(struct in6_addr *daddr) 228static inline int rt6_need_strict(struct in6_addr *daddr)
231{ 229{
232 return (ipv6_addr_type(daddr) & 230 return (ipv6_addr_type(daddr) &
233 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); 231 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK));
234} 232}
235 233
236/* 234/*
@@ -239,15 +237,20 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
239 237
240static inline struct rt6_info *rt6_device_match(struct net *net, 238static inline struct rt6_info *rt6_device_match(struct net *net,
241 struct rt6_info *rt, 239 struct rt6_info *rt,
240 struct in6_addr *saddr,
242 int oif, 241 int oif,
243 int flags) 242 int flags)
244{ 243{
245 struct rt6_info *local = NULL; 244 struct rt6_info *local = NULL;
246 struct rt6_info *sprt; 245 struct rt6_info *sprt;
247 246
248 if (oif) { 247 if (!oif && ipv6_addr_any(saddr))
249 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { 248 goto out;
250 struct net_device *dev = sprt->rt6i_dev; 249
250 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
251 struct net_device *dev = sprt->rt6i_dev;
252
253 if (oif) {
251 if (dev->ifindex == oif) 254 if (dev->ifindex == oif)
252 return sprt; 255 return sprt;
253 if (dev->flags & IFF_LOOPBACK) { 256 if (dev->flags & IFF_LOOPBACK) {
@@ -261,14 +264,21 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
261 } 264 }
262 local = sprt; 265 local = sprt;
263 } 266 }
267 } else {
268 if (ipv6_chk_addr(net, saddr, dev,
269 flags & RT6_LOOKUP_F_IFACE))
270 return sprt;
264 } 271 }
272 }
265 273
274 if (oif) {
266 if (local) 275 if (local)
267 return local; 276 return local;
268 277
269 if (flags & RT6_LOOKUP_F_IFACE) 278 if (flags & RT6_LOOKUP_F_IFACE)
270 return net->ipv6.ip6_null_entry; 279 return net->ipv6.ip6_null_entry;
271 } 280 }
281out:
272 return rt; 282 return rt;
273} 283}
274 284
@@ -541,7 +551,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
541 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); 551 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
542restart: 552restart:
543 rt = fn->leaf; 553 rt = fn->leaf;
544 rt = rt6_device_match(net, rt, fl->oif, flags); 554 rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
545 BACKTRACK(net, &fl->fl6_src); 555 BACKTRACK(net, &fl->fl6_src);
546out: 556out:
547 dst_use(&rt->u.dst, jiffies); 557 dst_use(&rt->u.dst, jiffies);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 32e871a6c25a..b7a50e968506 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -6,8 +6,6 @@
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * 8 *
9 * $Id: sit.c,v 1.53 2001/09/25 05:09:53 davem Exp $
10 *
11 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -493,13 +491,13 @@ static int ipip6_rcv(struct sk_buff *skb)
493 491
494 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 492 if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
495 !isatap_chksrc(skb, iph, tunnel)) { 493 !isatap_chksrc(skb, iph, tunnel)) {
496 tunnel->stat.rx_errors++; 494 tunnel->dev->stats.rx_errors++;
497 read_unlock(&ipip6_lock); 495 read_unlock(&ipip6_lock);
498 kfree_skb(skb); 496 kfree_skb(skb);
499 return 0; 497 return 0;
500 } 498 }
501 tunnel->stat.rx_packets++; 499 tunnel->dev->stats.rx_packets++;
502 tunnel->stat.rx_bytes += skb->len; 500 tunnel->dev->stats.rx_bytes += skb->len;
503 skb->dev = tunnel->dev; 501 skb->dev = tunnel->dev;
504 dst_release(skb->dst); 502 dst_release(skb->dst);
505 skb->dst = NULL; 503 skb->dst = NULL;
@@ -539,7 +537,7 @@ static inline __be32 try_6to4(struct in6_addr *v6dst)
539static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 537static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
540{ 538{
541 struct ip_tunnel *tunnel = netdev_priv(dev); 539 struct ip_tunnel *tunnel = netdev_priv(dev);
542 struct net_device_stats *stats = &tunnel->stat; 540 struct net_device_stats *stats = &tunnel->dev->stats;
543 struct iphdr *tiph = &tunnel->parms.iph; 541 struct iphdr *tiph = &tunnel->parms.iph;
544 struct ipv6hdr *iph6 = ipv6_hdr(skb); 542 struct ipv6hdr *iph6 = ipv6_hdr(skb);
545 u8 tos = tunnel->parms.iph.tos; 543 u8 tos = tunnel->parms.iph.tos;
@@ -553,7 +551,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
553 int addr_type; 551 int addr_type;
554 552
555 if (tunnel->recursion++) { 553 if (tunnel->recursion++) {
556 tunnel->stat.collisions++; 554 stats->collisions++;
557 goto tx_error; 555 goto tx_error;
558 } 556 }
559 557
@@ -620,20 +618,20 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
620 .oif = tunnel->parms.link, 618 .oif = tunnel->parms.link,
621 .proto = IPPROTO_IPV6 }; 619 .proto = IPPROTO_IPV6 };
622 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 620 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
623 tunnel->stat.tx_carrier_errors++; 621 stats->tx_carrier_errors++;
624 goto tx_error_icmp; 622 goto tx_error_icmp;
625 } 623 }
626 } 624 }
627 if (rt->rt_type != RTN_UNICAST) { 625 if (rt->rt_type != RTN_UNICAST) {
628 ip_rt_put(rt); 626 ip_rt_put(rt);
629 tunnel->stat.tx_carrier_errors++; 627 stats->tx_carrier_errors++;
630 goto tx_error_icmp; 628 goto tx_error_icmp;
631 } 629 }
632 tdev = rt->u.dst.dev; 630 tdev = rt->u.dst.dev;
633 631
634 if (tdev == dev) { 632 if (tdev == dev) {
635 ip_rt_put(rt); 633 ip_rt_put(rt);
636 tunnel->stat.collisions++; 634 stats->collisions++;
637 goto tx_error; 635 goto tx_error;
638 } 636 }
639 637
@@ -643,7 +641,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
643 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; 641 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
644 642
645 if (mtu < 68) { 643 if (mtu < 68) {
646 tunnel->stat.collisions++; 644 stats->collisions++;
647 ip_rt_put(rt); 645 ip_rt_put(rt);
648 goto tx_error; 646 goto tx_error;
649 } 647 }
@@ -920,11 +918,6 @@ done:
920 return err; 918 return err;
921} 919}
922 920
923static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev)
924{
925 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
926}
927
928static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) 921static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
929{ 922{
930 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 923 if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
@@ -938,7 +931,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
938 dev->uninit = ipip6_tunnel_uninit; 931 dev->uninit = ipip6_tunnel_uninit;
939 dev->destructor = free_netdev; 932 dev->destructor = free_netdev;
940 dev->hard_start_xmit = ipip6_tunnel_xmit; 933 dev->hard_start_xmit = ipip6_tunnel_xmit;
941 dev->get_stats = ipip6_tunnel_get_stats;
942 dev->do_ioctl = ipip6_tunnel_ioctl; 934 dev->do_ioctl = ipip6_tunnel_ioctl;
943 dev->change_mtu = ipip6_tunnel_change_mtu; 935 dev->change_mtu = ipip6_tunnel_change_mtu;
944 936
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 3804dcbbfab0..5c99274558bf 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -37,6 +37,10 @@ static ctl_table ipv6_table_template[] = {
37 .mode = 0644, 37 .mode = 0644,
38 .proc_handler = &proc_dointvec 38 .proc_handler = &proc_dointvec
39 }, 39 },
40 { .ctl_name = 0 }
41};
42
43static ctl_table ipv6_table[] = {
40 { 44 {
41 .ctl_name = NET_IPV6_MLD_MAX_MSF, 45 .ctl_name = NET_IPV6_MLD_MAX_MSF,
42 .procname = "mld_max_msf", 46 .procname = "mld_max_msf",
@@ -80,12 +84,6 @@ static int ipv6_sysctl_net_init(struct net *net)
80 84
81 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; 85 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
82 86
83 /* We don't want this value to be per namespace, it should be global
84 to all namespaces, so make it read-only when we are not in the
85 init network namespace */
86 if (net != &init_net)
87 ipv6_table[3].mode = 0444;
88
89 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, 87 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
90 ipv6_table); 88 ipv6_table);
91 if (!net->ipv6.sysctl.table) 89 if (!net->ipv6.sysctl.table)
@@ -126,12 +124,29 @@ static struct pernet_operations ipv6_sysctl_net_ops = {
126 .exit = ipv6_sysctl_net_exit, 124 .exit = ipv6_sysctl_net_exit,
127}; 125};
128 126
127static struct ctl_table_header *ip6_header;
128
129int ipv6_sysctl_register(void) 129int ipv6_sysctl_register(void)
130{ 130{
131 return register_pernet_subsys(&ipv6_sysctl_net_ops); 131 int err = -ENOMEM;;
132
133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
134 if (ip6_header == NULL)
135 goto out;
136
137 err = register_pernet_subsys(&ipv6_sysctl_net_ops);
138 if (err)
139 goto err_pernet;
140out:
141 return err;
142
143err_pernet:
144 unregister_net_sysctl_table(ip6_header);
145 goto out;
132} 146}
133 147
134void ipv6_sysctl_unregister(void) 148void ipv6_sysctl_unregister(void)
135{ 149{
150 unregister_net_sysctl_table(ip6_header);
136 unregister_pernet_subsys(&ipv6_sysctl_net_ops); 151 unregister_pernet_subsys(&ipv6_sysctl_net_ops);
137} 152}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 40ea9c36d24b..30dbab7cc3cc 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -5,8 +5,6 @@
5 * Authors: 5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt> 6 * Pedro Roque <roque@di.fc.ul.pt>
7 * 7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on: 8 * Based on:
11 * linux/net/ipv4/tcp.c 9 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c 10 * linux/net/ipv4/tcp_input.c
@@ -72,8 +70,6 @@
72 70
73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 71static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 72static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
75static void tcp_v6_send_check(struct sock *sk, int len,
76 struct sk_buff *skb);
77 73
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 74static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 75
@@ -82,6 +78,12 @@ static struct inet_connection_sock_af_ops ipv6_specific;
82#ifdef CONFIG_TCP_MD5SIG 78#ifdef CONFIG_TCP_MD5SIG
83static struct tcp_sock_af_ops tcp_sock_ipv6_specific; 79static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 80static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
81#else
82static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
83 struct in6_addr *addr)
84{
85 return NULL;
86}
85#endif 87#endif
86 88
87static void tcp_v6_hash(struct sock *sk) 89static void tcp_v6_hash(struct sock *sk)
@@ -736,78 +738,34 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
736static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 738static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
737 struct in6_addr *saddr, 739 struct in6_addr *saddr,
738 struct in6_addr *daddr, 740 struct in6_addr *daddr,
739 struct tcphdr *th, int protocol, 741 struct tcphdr *th, unsigned int tcplen)
740 unsigned int tcplen)
741{ 742{
742 struct scatterlist sg[4];
743 __u16 data_len;
744 int block = 0;
745 __sum16 cksum;
746 struct tcp_md5sig_pool *hp; 743 struct tcp_md5sig_pool *hp;
747 struct tcp6_pseudohdr *bp; 744 struct tcp6_pseudohdr *bp;
748 struct hash_desc *desc;
749 int err; 745 int err;
750 unsigned int nbytes = 0;
751 746
752 hp = tcp_get_md5sig_pool(); 747 hp = tcp_get_md5sig_pool();
753 if (!hp) { 748 if (!hp) {
754 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__); 749 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
755 goto clear_hash_noput; 750 goto clear_hash_noput;
756 } 751 }
752
757 bp = &hp->md5_blk.ip6; 753 bp = &hp->md5_blk.ip6;
758 desc = &hp->md5_desc;
759 754
760 /* 1. TCP pseudo-header (RFC2460) */ 755 /* 1. TCP pseudo-header (RFC2460) */
761 ipv6_addr_copy(&bp->saddr, saddr); 756 ipv6_addr_copy(&bp->saddr, saddr);
762 ipv6_addr_copy(&bp->daddr, daddr); 757 ipv6_addr_copy(&bp->daddr, daddr);
763 bp->len = htonl(tcplen); 758 bp->len = htonl(tcplen);
764 bp->protocol = htonl(protocol); 759 bp->protocol = htonl(IPPROTO_TCP);
765
766 sg_init_table(sg, 4);
767 760
768 sg_set_buf(&sg[block++], bp, sizeof(*bp)); 761 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
769 nbytes += sizeof(*bp); 762 th, tcplen, hp);
770 763
771 /* 2. TCP header, excluding options */ 764 if (err)
772 cksum = th->check;
773 th->check = 0;
774 sg_set_buf(&sg[block++], th, sizeof(*th));
775 nbytes += sizeof(*th);
776
777 /* 3. TCP segment data (if any) */
778 data_len = tcplen - (th->doff << 2);
779 if (data_len > 0) {
780 u8 *data = (u8 *)th + (th->doff << 2);
781 sg_set_buf(&sg[block++], data, data_len);
782 nbytes += data_len;
783 }
784
785 /* 4. shared key */
786 sg_set_buf(&sg[block++], key->key, key->keylen);
787 nbytes += key->keylen;
788
789 sg_mark_end(&sg[block - 1]);
790
791 /* Now store the hash into the packet */
792 err = crypto_hash_init(desc);
793 if (err) {
794 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
795 goto clear_hash;
796 }
797 err = crypto_hash_update(desc, sg, nbytes);
798 if (err) {
799 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
800 goto clear_hash;
801 }
802 err = crypto_hash_final(desc, md5_hash);
803 if (err) {
804 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
805 goto clear_hash; 765 goto clear_hash;
806 }
807 766
808 /* Reset header, and free up the crypto */ 767 /* Free up the crypto pool */
809 tcp_put_md5sig_pool(); 768 tcp_put_md5sig_pool();
810 th->check = cksum;
811out: 769out:
812 return 0; 770 return 0;
813clear_hash: 771clear_hash:
@@ -821,8 +779,7 @@ static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
821 struct sock *sk, 779 struct sock *sk,
822 struct dst_entry *dst, 780 struct dst_entry *dst,
823 struct request_sock *req, 781 struct request_sock *req,
824 struct tcphdr *th, int protocol, 782 struct tcphdr *th, unsigned int tcplen)
825 unsigned int tcplen)
826{ 783{
827 struct in6_addr *saddr, *daddr; 784 struct in6_addr *saddr, *daddr;
828 785
@@ -835,7 +792,7 @@ static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
835 } 792 }
836 return tcp_v6_do_calc_md5_hash(md5_hash, key, 793 return tcp_v6_do_calc_md5_hash(md5_hash, key,
837 saddr, daddr, 794 saddr, daddr,
838 th, protocol, tcplen); 795 th, tcplen);
839} 796}
840 797
841static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 798static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
@@ -844,43 +801,12 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
844 struct tcp_md5sig_key *hash_expected; 801 struct tcp_md5sig_key *hash_expected;
845 struct ipv6hdr *ip6h = ipv6_hdr(skb); 802 struct ipv6hdr *ip6h = ipv6_hdr(skb);
846 struct tcphdr *th = tcp_hdr(skb); 803 struct tcphdr *th = tcp_hdr(skb);
847 int length = (th->doff << 2) - sizeof (*th);
848 int genhash; 804 int genhash;
849 u8 *ptr;
850 u8 newhash[16]; 805 u8 newhash[16];
851 806
852 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); 807 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
808 hash_location = tcp_parse_md5sig_option(th);
853 809
854 /* If the TCP option is too short, we can short cut */
855 if (length < TCPOLEN_MD5SIG)
856 return hash_expected ? 1 : 0;
857
858 /* parse options */
859 ptr = (u8*)(th + 1);
860 while (length > 0) {
861 int opcode = *ptr++;
862 int opsize;
863
864 switch(opcode) {
865 case TCPOPT_EOL:
866 goto done_opts;
867 case TCPOPT_NOP:
868 length--;
869 continue;
870 default:
871 opsize = *ptr++;
872 if (opsize < 2 || opsize > length)
873 goto done_opts;
874 if (opcode == TCPOPT_MD5SIG) {
875 hash_location = ptr;
876 goto done_opts;
877 }
878 }
879 ptr += opsize - 2;
880 length -= opsize;
881 }
882
883done_opts:
884 /* do we have a hash as expected? */ 810 /* do we have a hash as expected? */
885 if (!hash_expected) { 811 if (!hash_expected) {
886 if (!hash_location) 812 if (!hash_location)
@@ -910,8 +836,7 @@ done_opts:
910 genhash = tcp_v6_do_calc_md5_hash(newhash, 836 genhash = tcp_v6_do_calc_md5_hash(newhash,
911 hash_expected, 837 hash_expected,
912 &ip6h->saddr, &ip6h->daddr, 838 &ip6h->saddr, &ip6h->daddr,
913 th, sk->sk_protocol, 839 th, skb->len);
914 skb->len);
915 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 840 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
916 if (net_ratelimit()) { 841 if (net_ratelimit()) {
917 printk(KERN_INFO "MD5 Hash %s for " 842 printk(KERN_INFO "MD5 Hash %s for "
@@ -1051,7 +976,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1051 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key, 976 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1052 &ipv6_hdr(skb)->daddr, 977 &ipv6_hdr(skb)->daddr,
1053 &ipv6_hdr(skb)->saddr, 978 &ipv6_hdr(skb)->saddr,
1054 t1, IPPROTO_TCP, tot_len); 979 t1, tot_len);
1055 } 980 }
1056#endif 981#endif
1057 982
@@ -1088,8 +1013,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1088 kfree_skb(buff); 1013 kfree_skb(buff);
1089} 1014}
1090 1015
1091static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, 1016static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1092 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) 1017 struct tcp_md5sig_key *key)
1093{ 1018{
1094 struct tcphdr *th = tcp_hdr(skb), *t1; 1019 struct tcphdr *th = tcp_hdr(skb), *t1;
1095 struct sk_buff *buff; 1020 struct sk_buff *buff;
@@ -1098,22 +1023,6 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1098 struct sock *ctl_sk = net->ipv6.tcp_sk; 1023 struct sock *ctl_sk = net->ipv6.tcp_sk;
1099 unsigned int tot_len = sizeof(struct tcphdr); 1024 unsigned int tot_len = sizeof(struct tcphdr);
1100 __be32 *topt; 1025 __be32 *topt;
1101#ifdef CONFIG_TCP_MD5SIG
1102 struct tcp_md5sig_key *key;
1103 struct tcp_md5sig_key tw_key;
1104#endif
1105
1106#ifdef CONFIG_TCP_MD5SIG
1107 if (!tw && skb->sk) {
1108 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1109 } else if (tw && tw->tw_md5_keylen) {
1110 tw_key.key = tw->tw_md5_key;
1111 tw_key.keylen = tw->tw_md5_keylen;
1112 key = &tw_key;
1113 } else {
1114 key = NULL;
1115 }
1116#endif
1117 1026
1118 if (ts) 1027 if (ts)
1119 tot_len += TCPOLEN_TSTAMP_ALIGNED; 1028 tot_len += TCPOLEN_TSTAMP_ALIGNED;
@@ -1157,7 +1066,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1157 tcp_v6_do_calc_md5_hash((__u8 *)topt, key, 1066 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1158 &ipv6_hdr(skb)->daddr, 1067 &ipv6_hdr(skb)->daddr,
1159 &ipv6_hdr(skb)->saddr, 1068 &ipv6_hdr(skb)->saddr,
1160 t1, IPPROTO_TCP, tot_len); 1069 t1, tot_len);
1161 } 1070 }
1162#endif 1071#endif
1163 1072
@@ -1193,16 +1102,17 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1193 struct inet_timewait_sock *tw = inet_twsk(sk); 1102 struct inet_timewait_sock *tw = inet_twsk(sk);
1194 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1103 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1195 1104
1196 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1105 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1197 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1106 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1198 tcptw->tw_ts_recent); 1107 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1199 1108
1200 inet_twsk_put(tw); 1109 inet_twsk_put(tw);
1201} 1110}
1202 1111
1203static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1112static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1204{ 1113{
1205 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); 1114 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1115 tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr));
1206} 1116}
1207 1117
1208 1118
@@ -1960,7 +1870,7 @@ static int tcp_v6_init_sock(struct sock *sk)
1960 return 0; 1870 return 0;
1961} 1871}
1962 1872
1963static int tcp_v6_destroy_sock(struct sock *sk) 1873static void tcp_v6_destroy_sock(struct sock *sk)
1964{ 1874{
1965#ifdef CONFIG_TCP_MD5SIG 1875#ifdef CONFIG_TCP_MD5SIG
1966 /* Clean up the MD5 key list */ 1876 /* Clean up the MD5 key list */
@@ -1968,7 +1878,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
1968 tcp_v6_clear_md5_list(sk); 1878 tcp_v6_clear_md5_list(sk);
1969#endif 1879#endif
1970 tcp_v4_destroy_sock(sk); 1880 tcp_v4_destroy_sock(sk);
1971 return inet6_destroy_sock(sk); 1881 inet6_destroy_sock(sk);
1972} 1882}
1973 1883
1974#ifdef CONFIG_PROC_FS 1884#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dd309626ae9a..d1477b350f76 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -7,8 +7,6 @@
7 * 7 *
8 * Based on linux/ipv4/udp.c 8 * Based on linux/ipv4/udp.c
9 * 9 *
10 * $Id: udp.c,v 1.65 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes: 10 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support 11 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 12 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
@@ -67,7 +65,7 @@ static struct sock *__udp6_lib_lookup(struct net *net,
67 int badness = -1; 65 int badness = -1;
68 66
69 read_lock(&udp_hash_lock); 67 read_lock(&udp_hash_lock);
70 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 68 sk_for_each(sk, node, &udptable[udp_hashfn(net, hnum)]) {
71 struct inet_sock *inet = inet_sk(sk); 69 struct inet_sock *inet = inet_sk(sk);
72 70
73 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && 71 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
@@ -168,7 +166,8 @@ try_again:
168 goto out_free; 166 goto out_free;
169 167
170 if (!peeked) 168 if (!peeked)
171 UDP6_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); 169 UDP6_INC_STATS_USER(sock_net(sk),
170 UDP_MIB_INDATAGRAMS, is_udplite);
172 171
173 sock_recv_timestamp(msg, sk, skb); 172 sock_recv_timestamp(msg, sk, skb);
174 173
@@ -215,7 +214,7 @@ out:
215csum_copy_err: 214csum_copy_err:
216 lock_sock(sk); 215 lock_sock(sk);
217 if (!skb_kill_datagram(sk, skb, flags)) 216 if (!skb_kill_datagram(sk, skb, flags))
218 UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); 217 UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
219 release_sock(sk); 218 release_sock(sk);
220 219
221 if (flags & MSG_DONTWAIT) 220 if (flags & MSG_DONTWAIT)
@@ -299,14 +298,17 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
299 298
300 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 299 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
301 /* Note that an ENOMEM error is charged twice */ 300 /* Note that an ENOMEM error is charged twice */
302 if (rc == -ENOMEM) 301 if (rc == -ENOMEM) {
303 UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); 302 UDP6_INC_STATS_BH(sock_net(sk),
303 UDP_MIB_RCVBUFERRORS, is_udplite);
304 atomic_inc(&sk->sk_drops);
305 }
304 goto drop; 306 goto drop;
305 } 307 }
306 308
307 return 0; 309 return 0;
308drop: 310drop:
309 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 311 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
310 kfree_skb(skb); 312 kfree_skb(skb);
311 return -1; 313 return -1;
312} 314}
@@ -355,15 +357,16 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
355 * Note: called only from the BH handler context, 357 * Note: called only from the BH handler context,
356 * so we don't need to lock the hashes. 358 * so we don't need to lock the hashes.
357 */ 359 */
358static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr, 360static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
359 struct in6_addr *daddr, struct hlist_head udptable[]) 361 struct in6_addr *saddr, struct in6_addr *daddr,
362 struct hlist_head udptable[])
360{ 363{
361 struct sock *sk, *sk2; 364 struct sock *sk, *sk2;
362 const struct udphdr *uh = udp_hdr(skb); 365 const struct udphdr *uh = udp_hdr(skb);
363 int dif; 366 int dif;
364 367
365 read_lock(&udp_hash_lock); 368 read_lock(&udp_hash_lock);
366 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); 369 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
367 dif = inet6_iif(skb); 370 dif = inet6_iif(skb);
368 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 371 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
369 if (!sk) { 372 if (!sk) {
@@ -437,6 +440,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
437 struct net_device *dev = skb->dev; 440 struct net_device *dev = skb->dev;
438 struct in6_addr *saddr, *daddr; 441 struct in6_addr *saddr, *daddr;
439 u32 ulen = 0; 442 u32 ulen = 0;
443 struct net *net = dev_net(skb->dev);
440 444
441 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 445 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
442 goto short_packet; 446 goto short_packet;
@@ -475,7 +479,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
475 * Multicast receive code 479 * Multicast receive code
476 */ 480 */
477 if (ipv6_addr_is_multicast(daddr)) 481 if (ipv6_addr_is_multicast(daddr))
478 return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable); 482 return __udp6_lib_mcast_deliver(net, skb,
483 saddr, daddr, udptable);
479 484
480 /* Unicast */ 485 /* Unicast */
481 486
@@ -483,7 +488,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
483 * check socket cache ... must talk to Alan about his plans 488 * check socket cache ... must talk to Alan about his plans
484 * for sock caches... i'll skip this for now. 489 * for sock caches... i'll skip this for now.
485 */ 490 */
486 sk = __udp6_lib_lookup(dev_net(skb->dev), saddr, uh->source, 491 sk = __udp6_lib_lookup(net, saddr, uh->source,
487 daddr, uh->dest, inet6_iif(skb), udptable); 492 daddr, uh->dest, inet6_iif(skb), udptable);
488 493
489 if (sk == NULL) { 494 if (sk == NULL) {
@@ -492,7 +497,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
492 497
493 if (udp_lib_checksum_complete(skb)) 498 if (udp_lib_checksum_complete(skb))
494 goto discard; 499 goto discard;
495 UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 500 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
501 proto == IPPROTO_UDPLITE);
496 502
497 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 503 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
498 504
@@ -517,7 +523,7 @@ short_packet:
517 ulen, skb->len); 523 ulen, skb->len);
518 524
519discard: 525discard:
520 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 526 UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
521 kfree_skb(skb); 527 kfree_skb(skb);
522 return 0; 528 return 0;
523} 529}
@@ -587,7 +593,8 @@ out:
587 up->len = 0; 593 up->len = 0;
588 up->pending = 0; 594 up->pending = 0;
589 if (!err) 595 if (!err)
590 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); 596 UDP6_INC_STATS_USER(sock_net(sk),
597 UDP_MIB_OUTDATAGRAMS, is_udplite);
591 return err; 598 return err;
592} 599}
593 600
@@ -869,7 +876,8 @@ out:
869 * seems like overkill. 876 * seems like overkill.
870 */ 877 */
871 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 878 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
872 UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); 879 UDP6_INC_STATS_USER(sock_net(sk),
880 UDP_MIB_SNDBUFERRORS, is_udplite);
873 } 881 }
874 return err; 882 return err;
875 883
@@ -881,15 +889,13 @@ do_confirm:
881 goto out; 889 goto out;
882} 890}
883 891
884int udpv6_destroy_sock(struct sock *sk) 892void udpv6_destroy_sock(struct sock *sk)
885{ 893{
886 lock_sock(sk); 894 lock_sock(sk);
887 udp_v6_flush_pending_frames(sk); 895 udp_v6_flush_pending_frames(sk);
888 release_sock(sk); 896 release_sock(sk);
889 897
890 inet6_destroy_sock(sk); 898 inet6_destroy_sock(sk);
891
892 return 0;
893} 899}
894 900
895/* 901/*
@@ -955,7 +961,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
955 srcp = ntohs(inet->sport); 961 srcp = ntohs(inet->sport);
956 seq_printf(seq, 962 seq_printf(seq,
957 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 963 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
958 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n", 964 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
959 bucket, 965 bucket,
960 src->s6_addr32[0], src->s6_addr32[1], 966 src->s6_addr32[0], src->s6_addr32[1],
961 src->s6_addr32[2], src->s6_addr32[3], srcp, 967 src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -967,7 +973,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
967 0, 0L, 0, 973 0, 0L, 0,
968 sock_i_uid(sp), 0, 974 sock_i_uid(sp), 0,
969 sock_i_ino(sp), 975 sock_i_ino(sp),
970 atomic_read(&sp->sk_refcnt), sp); 976 atomic_read(&sp->sk_refcnt), sp,
977 atomic_read(&sp->sk_drops));
971} 978}
972 979
973int udp6_seq_show(struct seq_file *seq, void *v) 980int udp6_seq_show(struct seq_file *seq, void *v)
@@ -978,7 +985,7 @@ int udp6_seq_show(struct seq_file *seq, void *v)
978 "local_address " 985 "local_address "
979 "remote_address " 986 "remote_address "
980 "st tx_queue rx_queue tr tm->when retrnsmt" 987 "st tx_queue rx_queue tr tm->when retrnsmt"
981 " uid timeout inode\n"); 988 " uid timeout inode ref pointer drops\n");
982 else 989 else
983 udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); 990 udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket);
984 return 0; 991 return 0;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 321b81a4d418..92dd7da766d8 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -29,7 +29,7 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
29 struct msghdr *msg, size_t len, 29 struct msghdr *msg, size_t len,
30 int noblock, int flags, int *addr_len); 30 int noblock, int flags, int *addr_len);
31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
32extern int udpv6_destroy_sock(struct sock *sk); 32extern void udpv6_destroy_sock(struct sock *sk);
33 33
34#ifdef CONFIG_PROC_FS 34#ifdef CONFIG_PROC_FS
35extern int udp6_seq_show(struct seq_file *seq, void *v); 35extern int udp6_seq_show(struct seq_file *seq, void *v);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 491efd00a866..f6cdcb348e05 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -2,8 +2,6 @@
2 * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6. 2 * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6.
3 * See also net/ipv4/udplite.c 3 * See also net/ipv4/udplite.c
4 * 4 *
5 * Version: $Id: udplite.c,v 1.9 2006/10/19 08:28:10 gerrit Exp $
6 *
7 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> 5 * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk>
8 * 6 *
9 * Changes: 7 * Changes:
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index e0eab5927c4f..f6e54fa97f47 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -628,8 +628,8 @@ dev_irnet_poll(struct file * file,
628 * This is the way pppd configure us and control us while the PPP 628 * This is the way pppd configure us and control us while the PPP
629 * instance is active. 629 * instance is active.
630 */ 630 */
631static int 631static long
632dev_irnet_ioctl(struct inode * inode, 632dev_irnet_ioctl(
633 struct file * file, 633 struct file * file,
634 unsigned int cmd, 634 unsigned int cmd,
635 unsigned long arg) 635 unsigned long arg)
@@ -660,6 +660,7 @@ dev_irnet_ioctl(struct inode * inode,
660 { 660 {
661 DEBUG(FS_INFO, "Entering PPP discipline.\n"); 661 DEBUG(FS_INFO, "Entering PPP discipline.\n");
662 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/ 662 /* PPP channel setup (ap->chan in configued in dev_irnet_open())*/
663 lock_kernel();
663 err = ppp_register_channel(&ap->chan); 664 err = ppp_register_channel(&ap->chan);
664 if(err == 0) 665 if(err == 0)
665 { 666 {
@@ -672,12 +673,14 @@ dev_irnet_ioctl(struct inode * inode,
672 } 673 }
673 else 674 else
674 DERROR(FS_ERROR, "Can't setup PPP channel...\n"); 675 DERROR(FS_ERROR, "Can't setup PPP channel...\n");
676 unlock_kernel();
675 } 677 }
676 else 678 else
677 { 679 {
678 /* In theory, should be N_TTY */ 680 /* In theory, should be N_TTY */
679 DEBUG(FS_INFO, "Exiting PPP discipline.\n"); 681 DEBUG(FS_INFO, "Exiting PPP discipline.\n");
680 /* Disconnect from the generic PPP layer */ 682 /* Disconnect from the generic PPP layer */
683 lock_kernel();
681 if(ap->ppp_open) 684 if(ap->ppp_open)
682 { 685 {
683 ap->ppp_open = 0; 686 ap->ppp_open = 0;
@@ -686,24 +689,20 @@ dev_irnet_ioctl(struct inode * inode,
686 else 689 else
687 DERROR(FS_ERROR, "Channel not registered !\n"); 690 DERROR(FS_ERROR, "Channel not registered !\n");
688 err = 0; 691 err = 0;
692 unlock_kernel();
689 } 693 }
690 break; 694 break;
691 695
692 /* Query PPP channel and unit number */ 696 /* Query PPP channel and unit number */
693 case PPPIOCGCHAN: 697 case PPPIOCGCHAN:
694 if(!ap->ppp_open) 698 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
695 break; 699 (int __user *)argp))
696 if(put_user(ppp_channel_index(&ap->chan), (int __user *)argp)) 700 err = 0;
697 break;
698 DEBUG(FS_INFO, "Query channel.\n");
699 err = 0;
700 break; 701 break;
701 case PPPIOCGUNIT: 702 case PPPIOCGUNIT:
702 if(!ap->ppp_open) 703 lock_kernel();
703 break; 704 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
704 if(put_user(ppp_unit_number(&ap->chan), (int __user *)argp)) 705 (int __user *)argp))
705 break;
706 DEBUG(FS_INFO, "Query unit number.\n");
707 err = 0; 706 err = 0;
708 break; 707 break;
709 708
@@ -723,34 +722,39 @@ dev_irnet_ioctl(struct inode * inode,
723 DEBUG(FS_INFO, "Standard PPP ioctl.\n"); 722 DEBUG(FS_INFO, "Standard PPP ioctl.\n");
724 if(!capable(CAP_NET_ADMIN)) 723 if(!capable(CAP_NET_ADMIN))
725 err = -EPERM; 724 err = -EPERM;
726 else 725 else {
726 lock_kernel();
727 err = ppp_irnet_ioctl(&ap->chan, cmd, arg); 727 err = ppp_irnet_ioctl(&ap->chan, cmd, arg);
728 unlock_kernel();
729 }
728 break; 730 break;
729 731
730 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */ 732 /* TTY IOCTLs : Pretend that we are a tty, to keep pppd happy */
731 /* Get termios */ 733 /* Get termios */
732 case TCGETS: 734 case TCGETS:
733 DEBUG(FS_INFO, "Get termios.\n"); 735 DEBUG(FS_INFO, "Get termios.\n");
736 lock_kernel();
734#ifndef TCGETS2 737#ifndef TCGETS2
735 if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) 738 if(!kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
736 break; 739 err = 0;
737#else 740#else
738 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios)) 741 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
739 break; 742 err = 0;
740#endif 743#endif
741 err = 0; 744 unlock_kernel();
742 break; 745 break;
743 /* Set termios */ 746 /* Set termios */
744 case TCSETSF: 747 case TCSETSF:
745 DEBUG(FS_INFO, "Set termios.\n"); 748 DEBUG(FS_INFO, "Set termios.\n");
749 lock_kernel();
746#ifndef TCGETS2 750#ifndef TCGETS2
747 if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) 751 if(!user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
748 break; 752 err = 0;
749#else 753#else
750 if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp)) 754 if(!user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
751 break; 755 err = 0;
752#endif 756#endif
753 err = 0; 757 unlock_kernel();
754 break; 758 break;
755 759
756 /* Set DTR/RTS */ 760 /* Set DTR/RTS */
@@ -773,7 +777,9 @@ dev_irnet_ioctl(struct inode * inode,
773 * We should also worry that we don't accept junk here and that 777 * We should also worry that we don't accept junk here and that
774 * we get rid of our own buffers */ 778 * we get rid of our own buffers */
775#ifdef FLUSH_TO_PPP 779#ifdef FLUSH_TO_PPP
780 lock_kernel();
776 ppp_output_wakeup(&ap->chan); 781 ppp_output_wakeup(&ap->chan);
782 unlock_kernel();
777#endif /* FLUSH_TO_PPP */ 783#endif /* FLUSH_TO_PPP */
778 err = 0; 784 err = 0;
779 break; 785 break;
@@ -788,7 +794,7 @@ dev_irnet_ioctl(struct inode * inode,
788 794
789 default: 795 default:
790 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd); 796 DERROR(FS_ERROR, "Unsupported ioctl (0x%X)\n", cmd);
791 err = -ENOIOCTLCMD; 797 err = -ENOTTY;
792 } 798 }
793 799
794 DEXIT(FS_TRACE, " - err = 0x%X\n", err); 800 DEXIT(FS_TRACE, " - err = 0x%X\n", err);
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index d2beb7df8f7f..d9f8bd4ebd05 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -76,9 +76,8 @@ static ssize_t
76static unsigned int 76static unsigned int
77 dev_irnet_poll(struct file *, 77 dev_irnet_poll(struct file *,
78 poll_table *); 78 poll_table *);
79static int 79static long
80 dev_irnet_ioctl(struct inode *, 80 dev_irnet_ioctl(struct file *,
81 struct file *,
82 unsigned int, 81 unsigned int,
83 unsigned long); 82 unsigned long);
84/* ------------------------ PPP INTERFACE ------------------------ */ 83/* ------------------------ PPP INTERFACE ------------------------ */
@@ -102,7 +101,7 @@ static struct file_operations irnet_device_fops =
102 .read = dev_irnet_read, 101 .read = dev_irnet_read,
103 .write = dev_irnet_write, 102 .write = dev_irnet_write,
104 .poll = dev_irnet_poll, 103 .poll = dev_irnet_poll,
105 .ioctl = dev_irnet_ioctl, 104 .unlocked_ioctl = dev_irnet_ioctl,
106 .open = dev_irnet_open, 105 .open = dev_irnet_open,
107 .release = dev_irnet_close 106 .release = dev_irnet_close
108 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */ 107 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 7b0038f45b16..58e4aee3e696 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -644,6 +644,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
644 } 644 }
645 645
646 txmsg.class = 0; 646 txmsg.class = 0;
647 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
647 txmsg.tag = iucv->send_tag++; 648 txmsg.tag = iucv->send_tag++;
648 memcpy(skb->cb, &txmsg.tag, 4); 649 memcpy(skb->cb, &txmsg.tag, 4);
649 skb_queue_tail(&iucv->send_skb_q, skb); 650 skb_queue_tail(&iucv->send_skb_q, skb);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 918970762131..531a206ce7a6 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -474,14 +474,14 @@ static void iucv_setmask_mp(void)
474{ 474{
475 int cpu; 475 int cpu;
476 476
477 preempt_disable(); 477 get_online_cpus();
478 for_each_online_cpu(cpu) 478 for_each_online_cpu(cpu)
479 /* Enable all cpus with a declared buffer. */ 479 /* Enable all cpus with a declared buffer. */
480 if (cpu_isset(cpu, iucv_buffer_cpumask) && 480 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
481 !cpu_isset(cpu, iucv_irq_cpumask)) 481 !cpu_isset(cpu, iucv_irq_cpumask))
482 smp_call_function_single(cpu, iucv_allow_cpu, 482 smp_call_function_single(cpu, iucv_allow_cpu,
483 NULL, 0, 1); 483 NULL, 0, 1);
484 preempt_enable(); 484 put_online_cpus();
485} 485}
486 486
487/** 487/**
@@ -521,16 +521,17 @@ static int iucv_enable(void)
521 goto out; 521 goto out;
522 /* Declare per cpu buffers. */ 522 /* Declare per cpu buffers. */
523 rc = -EIO; 523 rc = -EIO;
524 preempt_disable(); 524 get_online_cpus();
525 for_each_online_cpu(cpu) 525 for_each_online_cpu(cpu)
526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1); 526 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
527 preempt_enable();
528 if (cpus_empty(iucv_buffer_cpumask)) 527 if (cpus_empty(iucv_buffer_cpumask))
529 /* No cpu could declare an iucv buffer. */ 528 /* No cpu could declare an iucv buffer. */
530 goto out_path; 529 goto out_path;
530 put_online_cpus();
531 return 0; 531 return 0;
532 532
533out_path: 533out_path:
534 put_online_cpus();
534 kfree(iucv_path_table); 535 kfree(iucv_path_table);
535out: 536out:
536 return rc; 537 return rc;
@@ -545,7 +546,9 @@ out:
545 */ 546 */
546static void iucv_disable(void) 547static void iucv_disable(void)
547{ 548{
549 get_online_cpus();
548 on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1); 550 on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1);
551 put_online_cpus();
549 kfree(iucv_path_table); 552 kfree(iucv_path_table);
550} 553}
551 554
@@ -598,7 +601,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
598 return NOTIFY_OK; 601 return NOTIFY_OK;
599} 602}
600 603
601static struct notifier_block __cpuinitdata iucv_cpu_notifier = { 604static struct notifier_block __refdata iucv_cpu_notifier = {
602 .notifier_call = iucv_cpu_notify, 605 .notifier_call = iucv_cpu_notify,
603}; 606};
604 607
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7470e367272b..f0fc46c8038d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -579,25 +579,43 @@ static uint8_t pfkey_proto_from_xfrm(uint8_t proto)
579 return (proto ? proto : IPSEC_PROTO_ANY); 579 return (proto ? proto : IPSEC_PROTO_ANY);
580} 580}
581 581
582static int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, 582static inline int pfkey_sockaddr_len(sa_family_t family)
583 xfrm_address_t *xaddr)
584{ 583{
585 switch (((struct sockaddr*)(addr + 1))->sa_family) { 584 switch (family) {
585 case AF_INET:
586 return sizeof(struct sockaddr_in);
587#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
588 case AF_INET6:
589 return sizeof(struct sockaddr_in6);
590#endif
591 }
592 return 0;
593}
594
595static
596int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
597{
598 switch (sa->sa_family) {
586 case AF_INET: 599 case AF_INET:
587 xaddr->a4 = 600 xaddr->a4 =
588 ((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr; 601 ((struct sockaddr_in *)sa)->sin_addr.s_addr;
589 return AF_INET; 602 return AF_INET;
590#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 603#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
591 case AF_INET6: 604 case AF_INET6:
592 memcpy(xaddr->a6, 605 memcpy(xaddr->a6,
593 &((struct sockaddr_in6 *)(addr + 1))->sin6_addr, 606 &((struct sockaddr_in6 *)sa)->sin6_addr,
594 sizeof(struct in6_addr)); 607 sizeof(struct in6_addr));
595 return AF_INET6; 608 return AF_INET6;
596#endif 609#endif
597 default:
598 return 0;
599 } 610 }
600 /* NOTREACHED */ 611 return 0;
612}
613
614static
615int pfkey_sadb_addr2xfrm_addr(struct sadb_address *addr, xfrm_address_t *xaddr)
616{
617 return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1),
618 xaddr);
601} 619}
602 620
603static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs) 621static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **ext_hdrs)
@@ -642,20 +660,11 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **
642} 660}
643 661
644#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) 662#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
663
645static int 664static int
646pfkey_sockaddr_size(sa_family_t family) 665pfkey_sockaddr_size(sa_family_t family)
647{ 666{
648 switch (family) { 667 return PFKEY_ALIGN8(pfkey_sockaddr_len(family));
649 case AF_INET:
650 return PFKEY_ALIGN8(sizeof(struct sockaddr_in));
651#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
652 case AF_INET6:
653 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6));
654#endif
655 default:
656 return 0;
657 }
658 /* NOTREACHED */
659} 668}
660 669
661static inline int pfkey_mode_from_xfrm(int mode) 670static inline int pfkey_mode_from_xfrm(int mode)
@@ -687,6 +696,36 @@ static inline int pfkey_mode_to_xfrm(int mode)
687 } 696 }
688} 697}
689 698
699static unsigned int pfkey_sockaddr_fill(xfrm_address_t *xaddr, __be16 port,
700 struct sockaddr *sa,
701 unsigned short family)
702{
703 switch (family) {
704 case AF_INET:
705 {
706 struct sockaddr_in *sin = (struct sockaddr_in *)sa;
707 sin->sin_family = AF_INET;
708 sin->sin_port = port;
709 sin->sin_addr.s_addr = xaddr->a4;
710 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
711 return 32;
712 }
713#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
714 case AF_INET6:
715 {
716 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
717 sin6->sin6_family = AF_INET6;
718 sin6->sin6_port = port;
719 sin6->sin6_flowinfo = 0;
720 ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6);
721 sin6->sin6_scope_id = 0;
722 return 128;
723 }
724#endif
725 }
726 return 0;
727}
728
690static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, 729static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
691 int add_keys, int hsc) 730 int add_keys, int hsc)
692{ 731{
@@ -697,13 +736,9 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
697 struct sadb_address *addr; 736 struct sadb_address *addr;
698 struct sadb_key *key; 737 struct sadb_key *key;
699 struct sadb_x_sa2 *sa2; 738 struct sadb_x_sa2 *sa2;
700 struct sockaddr_in *sin;
701 struct sadb_x_sec_ctx *sec_ctx; 739 struct sadb_x_sec_ctx *sec_ctx;
702 struct xfrm_sec_ctx *xfrm_ctx; 740 struct xfrm_sec_ctx *xfrm_ctx;
703 int ctx_size = 0; 741 int ctx_size = 0;
704#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
705 struct sockaddr_in6 *sin6;
706#endif
707 int size; 742 int size;
708 int auth_key_size = 0; 743 int auth_key_size = 0;
709 int encrypt_key_size = 0; 744 int encrypt_key_size = 0;
@@ -732,14 +767,7 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
732 } 767 }
733 768
734 /* identity & sensitivity */ 769 /* identity & sensitivity */
735 770 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr, x->props.family))
736 if ((x->props.family == AF_INET &&
737 x->sel.saddr.a4 != x->props.saddr.a4)
738#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
739 || (x->props.family == AF_INET6 &&
740 memcmp (x->sel.saddr.a6, x->props.saddr.a6, sizeof (struct in6_addr)))
741#endif
742 )
743 size += sizeof(struct sadb_address) + sockaddr_size; 771 size += sizeof(struct sadb_address) + sockaddr_size;
744 772
745 if (add_keys) { 773 if (add_keys) {
@@ -861,29 +889,12 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
861 protocol's number." - RFC2367 */ 889 protocol's number." - RFC2367 */
862 addr->sadb_address_proto = 0; 890 addr->sadb_address_proto = 0;
863 addr->sadb_address_reserved = 0; 891 addr->sadb_address_reserved = 0;
864 if (x->props.family == AF_INET) {
865 addr->sadb_address_prefixlen = 32;
866 892
867 sin = (struct sockaddr_in *) (addr + 1); 893 addr->sadb_address_prefixlen =
868 sin->sin_family = AF_INET; 894 pfkey_sockaddr_fill(&x->props.saddr, 0,
869 sin->sin_addr.s_addr = x->props.saddr.a4; 895 (struct sockaddr *) (addr + 1),
870 sin->sin_port = 0; 896 x->props.family);
871 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 897 if (!addr->sadb_address_prefixlen)
872 }
873#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
874 else if (x->props.family == AF_INET6) {
875 addr->sadb_address_prefixlen = 128;
876
877 sin6 = (struct sockaddr_in6 *) (addr + 1);
878 sin6->sin6_family = AF_INET6;
879 sin6->sin6_port = 0;
880 sin6->sin6_flowinfo = 0;
881 memcpy(&sin6->sin6_addr, x->props.saddr.a6,
882 sizeof(struct in6_addr));
883 sin6->sin6_scope_id = 0;
884 }
885#endif
886 else
887 BUG(); 898 BUG();
888 899
889 /* dst address */ 900 /* dst address */
@@ -894,70 +905,32 @@ static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x,
894 sizeof(uint64_t); 905 sizeof(uint64_t);
895 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 906 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
896 addr->sadb_address_proto = 0; 907 addr->sadb_address_proto = 0;
897 addr->sadb_address_prefixlen = 32; /* XXX */
898 addr->sadb_address_reserved = 0; 908 addr->sadb_address_reserved = 0;
899 if (x->props.family == AF_INET) {
900 sin = (struct sockaddr_in *) (addr + 1);
901 sin->sin_family = AF_INET;
902 sin->sin_addr.s_addr = x->id.daddr.a4;
903 sin->sin_port = 0;
904 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
905 909
906 if (x->sel.saddr.a4 != x->props.saddr.a4) { 910 addr->sadb_address_prefixlen =
907 addr = (struct sadb_address*) skb_put(skb, 911 pfkey_sockaddr_fill(&x->id.daddr, 0,
908 sizeof(struct sadb_address)+sockaddr_size); 912 (struct sockaddr *) (addr + 1),
909 addr->sadb_address_len = 913 x->props.family);
910 (sizeof(struct sadb_address)+sockaddr_size)/ 914 if (!addr->sadb_address_prefixlen)
911 sizeof(uint64_t); 915 BUG();
912 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
913 addr->sadb_address_proto =
914 pfkey_proto_from_xfrm(x->sel.proto);
915 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
916 addr->sadb_address_reserved = 0;
917
918 sin = (struct sockaddr_in *) (addr + 1);
919 sin->sin_family = AF_INET;
920 sin->sin_addr.s_addr = x->sel.saddr.a4;
921 sin->sin_port = x->sel.sport;
922 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
923 }
924 }
925#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
926 else if (x->props.family == AF_INET6) {
927 addr->sadb_address_prefixlen = 128;
928 916
929 sin6 = (struct sockaddr_in6 *) (addr + 1); 917 if (xfrm_addr_cmp(&x->sel.saddr, &x->props.saddr,
930 sin6->sin6_family = AF_INET6; 918 x->props.family)) {
931 sin6->sin6_port = 0; 919 addr = (struct sadb_address*) skb_put(skb,
932 sin6->sin6_flowinfo = 0; 920 sizeof(struct sadb_address)+sockaddr_size);
933 memcpy(&sin6->sin6_addr, x->id.daddr.a6, sizeof(struct in6_addr)); 921 addr->sadb_address_len =
934 sin6->sin6_scope_id = 0; 922 (sizeof(struct sadb_address)+sockaddr_size)/
923 sizeof(uint64_t);
924 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
925 addr->sadb_address_proto =
926 pfkey_proto_from_xfrm(x->sel.proto);
927 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
928 addr->sadb_address_reserved = 0;
935 929
936 if (memcmp (x->sel.saddr.a6, x->props.saddr.a6, 930 pfkey_sockaddr_fill(&x->sel.saddr, x->sel.sport,
937 sizeof(struct in6_addr))) { 931 (struct sockaddr *) (addr + 1),
938 addr = (struct sadb_address *) skb_put(skb, 932 x->props.family);
939 sizeof(struct sadb_address)+sockaddr_size);
940 addr->sadb_address_len =
941 (sizeof(struct sadb_address)+sockaddr_size)/
942 sizeof(uint64_t);
943 addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY;
944 addr->sadb_address_proto =
945 pfkey_proto_from_xfrm(x->sel.proto);
946 addr->sadb_address_prefixlen = x->sel.prefixlen_s;
947 addr->sadb_address_reserved = 0;
948
949 sin6 = (struct sockaddr_in6 *) (addr + 1);
950 sin6->sin6_family = AF_INET6;
951 sin6->sin6_port = x->sel.sport;
952 sin6->sin6_flowinfo = 0;
953 memcpy(&sin6->sin6_addr, x->sel.saddr.a6,
954 sizeof(struct in6_addr));
955 sin6->sin6_scope_id = 0;
956 }
957 } 933 }
958#endif
959 else
960 BUG();
961 934
962 /* auth key */ 935 /* auth key */
963 if (add_keys && auth_key_size) { 936 if (add_keys && auth_key_size) {
@@ -1853,10 +1826,6 @@ static int
1853parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) 1826parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1854{ 1827{
1855 struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr; 1828 struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
1856 struct sockaddr_in *sin;
1857#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1858 struct sockaddr_in6 *sin6;
1859#endif
1860 int mode; 1829 int mode;
1861 1830
1862 if (xp->xfrm_nr >= XFRM_MAX_DEPTH) 1831 if (xp->xfrm_nr >= XFRM_MAX_DEPTH)
@@ -1881,31 +1850,19 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1881 1850
1882 /* addresses present only in tunnel mode */ 1851 /* addresses present only in tunnel mode */
1883 if (t->mode == XFRM_MODE_TUNNEL) { 1852 if (t->mode == XFRM_MODE_TUNNEL) {
1884 struct sockaddr *sa; 1853 u8 *sa = (u8 *) (rq + 1);
1885 sa = (struct sockaddr *)(rq+1); 1854 int family, socklen;
1886 switch(sa->sa_family) { 1855
1887 case AF_INET: 1856 family = pfkey_sockaddr_extract((struct sockaddr *)sa,
1888 sin = (struct sockaddr_in*)sa; 1857 &t->saddr);
1889 t->saddr.a4 = sin->sin_addr.s_addr; 1858 if (!family)
1890 sin++;
1891 if (sin->sin_family != AF_INET)
1892 return -EINVAL;
1893 t->id.daddr.a4 = sin->sin_addr.s_addr;
1894 break;
1895#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1896 case AF_INET6:
1897 sin6 = (struct sockaddr_in6*)sa;
1898 memcpy(t->saddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr));
1899 sin6++;
1900 if (sin6->sin6_family != AF_INET6)
1901 return -EINVAL;
1902 memcpy(t->id.daddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr));
1903 break;
1904#endif
1905 default:
1906 return -EINVAL; 1859 return -EINVAL;
1907 } 1860
1908 t->encap_family = sa->sa_family; 1861 socklen = pfkey_sockaddr_len(family);
1862 if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
1863 &t->id.daddr) != family)
1864 return -EINVAL;
1865 t->encap_family = family;
1909 } else 1866 } else
1910 t->encap_family = xp->family; 1867 t->encap_family = xp->family;
1911 1868
@@ -1952,9 +1909,7 @@ static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
1952 1909
1953 for (i=0; i<xp->xfrm_nr; i++) { 1910 for (i=0; i<xp->xfrm_nr; i++) {
1954 t = xp->xfrm_vec + i; 1911 t = xp->xfrm_vec + i;
1955 socklen += (t->encap_family == AF_INET ? 1912 socklen += pfkey_sockaddr_len(t->encap_family);
1956 sizeof(struct sockaddr_in) :
1957 sizeof(struct sockaddr_in6));
1958 } 1913 }
1959 1914
1960 return sizeof(struct sadb_msg) + 1915 return sizeof(struct sadb_msg) +
@@ -1987,18 +1942,12 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
1987 struct sadb_address *addr; 1942 struct sadb_address *addr;
1988 struct sadb_lifetime *lifetime; 1943 struct sadb_lifetime *lifetime;
1989 struct sadb_x_policy *pol; 1944 struct sadb_x_policy *pol;
1990 struct sockaddr_in *sin;
1991 struct sadb_x_sec_ctx *sec_ctx; 1945 struct sadb_x_sec_ctx *sec_ctx;
1992 struct xfrm_sec_ctx *xfrm_ctx; 1946 struct xfrm_sec_ctx *xfrm_ctx;
1993#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1994 struct sockaddr_in6 *sin6;
1995#endif
1996 int i; 1947 int i;
1997 int size; 1948 int size;
1998 int sockaddr_size = pfkey_sockaddr_size(xp->family); 1949 int sockaddr_size = pfkey_sockaddr_size(xp->family);
1999 int socklen = (xp->family == AF_INET ? 1950 int socklen = pfkey_sockaddr_len(xp->family);
2000 sizeof(struct sockaddr_in) :
2001 sizeof(struct sockaddr_in6));
2002 1951
2003 size = pfkey_xfrm_policy2msg_size(xp); 1952 size = pfkey_xfrm_policy2msg_size(xp);
2004 1953
@@ -2016,26 +1965,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2016 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); 1965 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
2017 addr->sadb_address_prefixlen = xp->selector.prefixlen_s; 1966 addr->sadb_address_prefixlen = xp->selector.prefixlen_s;
2018 addr->sadb_address_reserved = 0; 1967 addr->sadb_address_reserved = 0;
2019 /* src address */ 1968 if (!pfkey_sockaddr_fill(&xp->selector.saddr,
2020 if (xp->family == AF_INET) { 1969 xp->selector.sport,
2021 sin = (struct sockaddr_in *) (addr + 1); 1970 (struct sockaddr *) (addr + 1),
2022 sin->sin_family = AF_INET; 1971 xp->family))
2023 sin->sin_addr.s_addr = xp->selector.saddr.a4;
2024 sin->sin_port = xp->selector.sport;
2025 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2026 }
2027#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2028 else if (xp->family == AF_INET6) {
2029 sin6 = (struct sockaddr_in6 *) (addr + 1);
2030 sin6->sin6_family = AF_INET6;
2031 sin6->sin6_port = xp->selector.sport;
2032 sin6->sin6_flowinfo = 0;
2033 memcpy(&sin6->sin6_addr, xp->selector.saddr.a6,
2034 sizeof(struct in6_addr));
2035 sin6->sin6_scope_id = 0;
2036 }
2037#endif
2038 else
2039 BUG(); 1972 BUG();
2040 1973
2041 /* dst address */ 1974 /* dst address */
@@ -2048,26 +1981,10 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2048 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); 1981 addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto);
2049 addr->sadb_address_prefixlen = xp->selector.prefixlen_d; 1982 addr->sadb_address_prefixlen = xp->selector.prefixlen_d;
2050 addr->sadb_address_reserved = 0; 1983 addr->sadb_address_reserved = 0;
2051 if (xp->family == AF_INET) { 1984
2052 sin = (struct sockaddr_in *) (addr + 1); 1985 pfkey_sockaddr_fill(&xp->selector.daddr, xp->selector.dport,
2053 sin->sin_family = AF_INET; 1986 (struct sockaddr *) (addr + 1),
2054 sin->sin_addr.s_addr = xp->selector.daddr.a4; 1987 xp->family);
2055 sin->sin_port = xp->selector.dport;
2056 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2057 }
2058#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2059 else if (xp->family == AF_INET6) {
2060 sin6 = (struct sockaddr_in6 *) (addr + 1);
2061 sin6->sin6_family = AF_INET6;
2062 sin6->sin6_port = xp->selector.dport;
2063 sin6->sin6_flowinfo = 0;
2064 memcpy(&sin6->sin6_addr, xp->selector.daddr.a6,
2065 sizeof(struct in6_addr));
2066 sin6->sin6_scope_id = 0;
2067 }
2068#endif
2069 else
2070 BUG();
2071 1988
2072 /* hard time */ 1989 /* hard time */
2073 lifetime = (struct sadb_lifetime *) skb_put(skb, 1990 lifetime = (struct sadb_lifetime *) skb_put(skb,
@@ -2121,12 +2038,13 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2121 int mode; 2038 int mode;
2122 2039
2123 req_size = sizeof(struct sadb_x_ipsecrequest); 2040 req_size = sizeof(struct sadb_x_ipsecrequest);
2124 if (t->mode == XFRM_MODE_TUNNEL) 2041 if (t->mode == XFRM_MODE_TUNNEL) {
2125 req_size += ((t->encap_family == AF_INET ? 2042 socklen = pfkey_sockaddr_len(t->encap_family);
2126 sizeof(struct sockaddr_in) : 2043 req_size += socklen * 2;
2127 sizeof(struct sockaddr_in6)) * 2); 2044 } else {
2128 else
2129 size -= 2*socklen; 2045 size -= 2*socklen;
2046 socklen = 0;
2047 }
2130 rq = (void*)skb_put(skb, req_size); 2048 rq = (void*)skb_put(skb, req_size);
2131 pol->sadb_x_policy_len += req_size/8; 2049 pol->sadb_x_policy_len += req_size/8;
2132 memset(rq, 0, sizeof(*rq)); 2050 memset(rq, 0, sizeof(*rq));
@@ -2141,42 +2059,15 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2141 if (t->optional) 2059 if (t->optional)
2142 rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; 2060 rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
2143 rq->sadb_x_ipsecrequest_reqid = t->reqid; 2061 rq->sadb_x_ipsecrequest_reqid = t->reqid;
2062
2144 if (t->mode == XFRM_MODE_TUNNEL) { 2063 if (t->mode == XFRM_MODE_TUNNEL) {
2145 switch (t->encap_family) { 2064 u8 *sa = (void *)(rq + 1);
2146 case AF_INET: 2065 pfkey_sockaddr_fill(&t->saddr, 0,
2147 sin = (void*)(rq+1); 2066 (struct sockaddr *)sa,
2148 sin->sin_family = AF_INET; 2067 t->encap_family);
2149 sin->sin_addr.s_addr = t->saddr.a4; 2068 pfkey_sockaddr_fill(&t->id.daddr, 0,
2150 sin->sin_port = 0; 2069 (struct sockaddr *) (sa + socklen),
2151 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 2070 t->encap_family);
2152 sin++;
2153 sin->sin_family = AF_INET;
2154 sin->sin_addr.s_addr = t->id.daddr.a4;
2155 sin->sin_port = 0;
2156 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2157 break;
2158#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2159 case AF_INET6:
2160 sin6 = (void*)(rq+1);
2161 sin6->sin6_family = AF_INET6;
2162 sin6->sin6_port = 0;
2163 sin6->sin6_flowinfo = 0;
2164 memcpy(&sin6->sin6_addr, t->saddr.a6,
2165 sizeof(struct in6_addr));
2166 sin6->sin6_scope_id = 0;
2167
2168 sin6++;
2169 sin6->sin6_family = AF_INET6;
2170 sin6->sin6_port = 0;
2171 sin6->sin6_flowinfo = 0;
2172 memcpy(&sin6->sin6_addr, t->id.daddr.a6,
2173 sizeof(struct in6_addr));
2174 sin6->sin6_scope_id = 0;
2175 break;
2176#endif
2177 default:
2178 break;
2179 }
2180 } 2071 }
2181 } 2072 }
2182 2073
@@ -2459,61 +2350,31 @@ out:
2459#ifdef CONFIG_NET_KEY_MIGRATE 2350#ifdef CONFIG_NET_KEY_MIGRATE
2460static int pfkey_sockaddr_pair_size(sa_family_t family) 2351static int pfkey_sockaddr_pair_size(sa_family_t family)
2461{ 2352{
2462 switch (family) { 2353 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
2463 case AF_INET:
2464 return PFKEY_ALIGN8(sizeof(struct sockaddr_in) * 2);
2465#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2466 case AF_INET6:
2467 return PFKEY_ALIGN8(sizeof(struct sockaddr_in6) * 2);
2468#endif
2469 default:
2470 return 0;
2471 }
2472 /* NOTREACHED */
2473} 2354}
2474 2355
2475static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq, 2356static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq,
2476 xfrm_address_t *saddr, xfrm_address_t *daddr, 2357 xfrm_address_t *saddr, xfrm_address_t *daddr,
2477 u16 *family) 2358 u16 *family)
2478{ 2359{
2479 struct sockaddr *sa = (struct sockaddr *)(rq + 1); 2360 u8 *sa = (u8 *) (rq + 1);
2361 int af, socklen;
2362
2480 if (rq->sadb_x_ipsecrequest_len < 2363 if (rq->sadb_x_ipsecrequest_len <
2481 pfkey_sockaddr_pair_size(sa->sa_family)) 2364 pfkey_sockaddr_pair_size(((struct sockaddr *)sa)->sa_family))
2482 return -EINVAL; 2365 return -EINVAL;
2483 2366
2484 switch (sa->sa_family) { 2367 af = pfkey_sockaddr_extract((struct sockaddr *) sa,
2485 case AF_INET: 2368 saddr);
2486 { 2369 if (!af)
2487 struct sockaddr_in *sin;
2488 sin = (struct sockaddr_in *)sa;
2489 if ((sin+1)->sin_family != AF_INET)
2490 return -EINVAL;
2491 memcpy(&saddr->a4, &sin->sin_addr, sizeof(saddr->a4));
2492 sin++;
2493 memcpy(&daddr->a4, &sin->sin_addr, sizeof(daddr->a4));
2494 *family = AF_INET;
2495 break;
2496 }
2497#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2498 case AF_INET6:
2499 {
2500 struct sockaddr_in6 *sin6;
2501 sin6 = (struct sockaddr_in6 *)sa;
2502 if ((sin6+1)->sin6_family != AF_INET6)
2503 return -EINVAL;
2504 memcpy(&saddr->a6, &sin6->sin6_addr,
2505 sizeof(saddr->a6));
2506 sin6++;
2507 memcpy(&daddr->a6, &sin6->sin6_addr,
2508 sizeof(daddr->a6));
2509 *family = AF_INET6;
2510 break;
2511 }
2512#endif
2513 default:
2514 return -EINVAL; 2370 return -EINVAL;
2515 }
2516 2371
2372 socklen = pfkey_sockaddr_len(af);
2373 if (pfkey_sockaddr_extract((struct sockaddr *) (sa + socklen),
2374 daddr) != af)
2375 return -EINVAL;
2376
2377 *family = af;
2517 return 0; 2378 return 0;
2518} 2379}
2519 2380
@@ -3094,10 +2955,6 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3094 struct sadb_msg *hdr; 2955 struct sadb_msg *hdr;
3095 struct sadb_address *addr; 2956 struct sadb_address *addr;
3096 struct sadb_x_policy *pol; 2957 struct sadb_x_policy *pol;
3097 struct sockaddr_in *sin;
3098#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3099 struct sockaddr_in6 *sin6;
3100#endif
3101 int sockaddr_size; 2958 int sockaddr_size;
3102 int size; 2959 int size;
3103 struct sadb_x_sec_ctx *sec_ctx; 2960 struct sadb_x_sec_ctx *sec_ctx;
@@ -3146,29 +3003,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3146 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 3003 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
3147 addr->sadb_address_proto = 0; 3004 addr->sadb_address_proto = 0;
3148 addr->sadb_address_reserved = 0; 3005 addr->sadb_address_reserved = 0;
3149 if (x->props.family == AF_INET) { 3006 addr->sadb_address_prefixlen =
3150 addr->sadb_address_prefixlen = 32; 3007 pfkey_sockaddr_fill(&x->props.saddr, 0,
3151 3008 (struct sockaddr *) (addr + 1),
3152 sin = (struct sockaddr_in *) (addr + 1); 3009 x->props.family);
3153 sin->sin_family = AF_INET; 3010 if (!addr->sadb_address_prefixlen)
3154 sin->sin_addr.s_addr = x->props.saddr.a4;
3155 sin->sin_port = 0;
3156 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3157 }
3158#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3159 else if (x->props.family == AF_INET6) {
3160 addr->sadb_address_prefixlen = 128;
3161
3162 sin6 = (struct sockaddr_in6 *) (addr + 1);
3163 sin6->sin6_family = AF_INET6;
3164 sin6->sin6_port = 0;
3165 sin6->sin6_flowinfo = 0;
3166 memcpy(&sin6->sin6_addr,
3167 x->props.saddr.a6, sizeof(struct in6_addr));
3168 sin6->sin6_scope_id = 0;
3169 }
3170#endif
3171 else
3172 BUG(); 3011 BUG();
3173 3012
3174 /* dst address */ 3013 /* dst address */
@@ -3180,29 +3019,11 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3180 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 3019 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
3181 addr->sadb_address_proto = 0; 3020 addr->sadb_address_proto = 0;
3182 addr->sadb_address_reserved = 0; 3021 addr->sadb_address_reserved = 0;
3183 if (x->props.family == AF_INET) { 3022 addr->sadb_address_prefixlen =
3184 addr->sadb_address_prefixlen = 32; 3023 pfkey_sockaddr_fill(&x->id.daddr, 0,
3185 3024 (struct sockaddr *) (addr + 1),
3186 sin = (struct sockaddr_in *) (addr + 1); 3025 x->props.family);
3187 sin->sin_family = AF_INET; 3026 if (!addr->sadb_address_prefixlen)
3188 sin->sin_addr.s_addr = x->id.daddr.a4;
3189 sin->sin_port = 0;
3190 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3191 }
3192#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3193 else if (x->props.family == AF_INET6) {
3194 addr->sadb_address_prefixlen = 128;
3195
3196 sin6 = (struct sockaddr_in6 *) (addr + 1);
3197 sin6->sin6_family = AF_INET6;
3198 sin6->sin6_port = 0;
3199 sin6->sin6_flowinfo = 0;
3200 memcpy(&sin6->sin6_addr,
3201 x->id.daddr.a6, sizeof(struct in6_addr));
3202 sin6->sin6_scope_id = 0;
3203 }
3204#endif
3205 else
3206 BUG(); 3027 BUG();
3207 3028
3208 pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); 3029 pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy));
@@ -3328,10 +3149,6 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3328 struct sadb_sa *sa; 3149 struct sadb_sa *sa;
3329 struct sadb_address *addr; 3150 struct sadb_address *addr;
3330 struct sadb_x_nat_t_port *n_port; 3151 struct sadb_x_nat_t_port *n_port;
3331 struct sockaddr_in *sin;
3332#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3333 struct sockaddr_in6 *sin6;
3334#endif
3335 int sockaddr_size; 3152 int sockaddr_size;
3336 int size; 3153 int size;
3337 __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0); 3154 __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0);
@@ -3395,29 +3212,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3395 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; 3212 addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
3396 addr->sadb_address_proto = 0; 3213 addr->sadb_address_proto = 0;
3397 addr->sadb_address_reserved = 0; 3214 addr->sadb_address_reserved = 0;
3398 if (x->props.family == AF_INET) { 3215 addr->sadb_address_prefixlen =
3399 addr->sadb_address_prefixlen = 32; 3216 pfkey_sockaddr_fill(&x->props.saddr, 0,
3400 3217 (struct sockaddr *) (addr + 1),
3401 sin = (struct sockaddr_in *) (addr + 1); 3218 x->props.family);
3402 sin->sin_family = AF_INET; 3219 if (!addr->sadb_address_prefixlen)
3403 sin->sin_addr.s_addr = x->props.saddr.a4;
3404 sin->sin_port = 0;
3405 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3406 }
3407#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3408 else if (x->props.family == AF_INET6) {
3409 addr->sadb_address_prefixlen = 128;
3410
3411 sin6 = (struct sockaddr_in6 *) (addr + 1);
3412 sin6->sin6_family = AF_INET6;
3413 sin6->sin6_port = 0;
3414 sin6->sin6_flowinfo = 0;
3415 memcpy(&sin6->sin6_addr,
3416 x->props.saddr.a6, sizeof(struct in6_addr));
3417 sin6->sin6_scope_id = 0;
3418 }
3419#endif
3420 else
3421 BUG(); 3220 BUG();
3422 3221
3423 /* NAT_T_SPORT (old port) */ 3222 /* NAT_T_SPORT (old port) */
@@ -3436,28 +3235,11 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3436 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; 3235 addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST;
3437 addr->sadb_address_proto = 0; 3236 addr->sadb_address_proto = 0;
3438 addr->sadb_address_reserved = 0; 3237 addr->sadb_address_reserved = 0;
3439 if (x->props.family == AF_INET) { 3238 addr->sadb_address_prefixlen =
3440 addr->sadb_address_prefixlen = 32; 3239 pfkey_sockaddr_fill(ipaddr, 0,
3441 3240 (struct sockaddr *) (addr + 1),
3442 sin = (struct sockaddr_in *) (addr + 1); 3241 x->props.family);
3443 sin->sin_family = AF_INET; 3242 if (!addr->sadb_address_prefixlen)
3444 sin->sin_addr.s_addr = ipaddr->a4;
3445 sin->sin_port = 0;
3446 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3447 }
3448#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3449 else if (x->props.family == AF_INET6) {
3450 addr->sadb_address_prefixlen = 128;
3451
3452 sin6 = (struct sockaddr_in6 *) (addr + 1);
3453 sin6->sin6_family = AF_INET6;
3454 sin6->sin6_port = 0;
3455 sin6->sin6_flowinfo = 0;
3456 memcpy(&sin6->sin6_addr, &ipaddr->a6, sizeof(struct in6_addr));
3457 sin6->sin6_scope_id = 0;
3458 }
3459#endif
3460 else
3461 BUG(); 3243 BUG();
3462 3244
3463 /* NAT_T_DPORT (new port) */ 3245 /* NAT_T_DPORT (new port) */
@@ -3475,10 +3257,6 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3475 struct xfrm_selector *sel) 3257 struct xfrm_selector *sel)
3476{ 3258{
3477 struct sadb_address *addr; 3259 struct sadb_address *addr;
3478 struct sockaddr_in *sin;
3479#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3480 struct sockaddr_in6 *sin6;
3481#endif
3482 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize); 3260 addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
3483 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8; 3261 addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
3484 addr->sadb_address_exttype = type; 3262 addr->sadb_address_exttype = type;
@@ -3487,50 +3265,16 @@ static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
3487 3265
3488 switch (type) { 3266 switch (type) {
3489 case SADB_EXT_ADDRESS_SRC: 3267 case SADB_EXT_ADDRESS_SRC:
3490 if (sel->family == AF_INET) { 3268 addr->sadb_address_prefixlen = sel->prefixlen_s;
3491 addr->sadb_address_prefixlen = sel->prefixlen_s; 3269 pfkey_sockaddr_fill(&sel->saddr, 0,
3492 sin = (struct sockaddr_in *)(addr + 1); 3270 (struct sockaddr *)(addr + 1),
3493 sin->sin_family = AF_INET; 3271 sel->family);
3494 memcpy(&sin->sin_addr.s_addr, &sel->saddr,
3495 sizeof(sin->sin_addr.s_addr));
3496 sin->sin_port = 0;
3497 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3498 }
3499#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3500 else if (sel->family == AF_INET6) {
3501 addr->sadb_address_prefixlen = sel->prefixlen_s;
3502 sin6 = (struct sockaddr_in6 *)(addr + 1);
3503 sin6->sin6_family = AF_INET6;
3504 sin6->sin6_port = 0;
3505 sin6->sin6_flowinfo = 0;
3506 sin6->sin6_scope_id = 0;
3507 memcpy(&sin6->sin6_addr.s6_addr, &sel->saddr,
3508 sizeof(sin6->sin6_addr.s6_addr));
3509 }
3510#endif
3511 break; 3272 break;
3512 case SADB_EXT_ADDRESS_DST: 3273 case SADB_EXT_ADDRESS_DST:
3513 if (sel->family == AF_INET) { 3274 addr->sadb_address_prefixlen = sel->prefixlen_d;
3514 addr->sadb_address_prefixlen = sel->prefixlen_d; 3275 pfkey_sockaddr_fill(&sel->daddr, 0,
3515 sin = (struct sockaddr_in *)(addr + 1); 3276 (struct sockaddr *)(addr + 1),
3516 sin->sin_family = AF_INET; 3277 sel->family);
3517 memcpy(&sin->sin_addr.s_addr, &sel->daddr,
3518 sizeof(sin->sin_addr.s_addr));
3519 sin->sin_port = 0;
3520 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
3521 }
3522#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3523 else if (sel->family == AF_INET6) {
3524 addr->sadb_address_prefixlen = sel->prefixlen_d;
3525 sin6 = (struct sockaddr_in6 *)(addr + 1);
3526 sin6->sin6_family = AF_INET6;
3527 sin6->sin6_port = 0;
3528 sin6->sin6_flowinfo = 0;
3529 sin6->sin6_scope_id = 0;
3530 memcpy(&sin6->sin6_addr.s6_addr, &sel->daddr,
3531 sizeof(sin6->sin6_addr.s6_addr));
3532 }
3533#endif
3534 break; 3278 break;
3535 default: 3279 default:
3536 return -EINVAL; 3280 return -EINVAL;
@@ -3545,10 +3289,8 @@ static int set_ipsecrequest(struct sk_buff *skb,
3545 xfrm_address_t *src, xfrm_address_t *dst) 3289 xfrm_address_t *src, xfrm_address_t *dst)
3546{ 3290{
3547 struct sadb_x_ipsecrequest *rq; 3291 struct sadb_x_ipsecrequest *rq;
3548 struct sockaddr_in *sin; 3292 u8 *sa;
3549#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3293 int socklen = pfkey_sockaddr_len(family);
3550 struct sockaddr_in6 *sin6;
3551#endif
3552 int size_req; 3294 int size_req;
3553 3295
3554 size_req = sizeof(struct sadb_x_ipsecrequest) + 3296 size_req = sizeof(struct sadb_x_ipsecrequest) +
@@ -3562,38 +3304,10 @@ static int set_ipsecrequest(struct sk_buff *skb,
3562 rq->sadb_x_ipsecrequest_level = level; 3304 rq->sadb_x_ipsecrequest_level = level;
3563 rq->sadb_x_ipsecrequest_reqid = reqid; 3305 rq->sadb_x_ipsecrequest_reqid = reqid;
3564 3306
3565 switch (family) { 3307 sa = (u8 *) (rq + 1);
3566 case AF_INET: 3308 if (!pfkey_sockaddr_fill(src, 0, (struct sockaddr *)sa, family) ||
3567 sin = (struct sockaddr_in *)(rq + 1); 3309 !pfkey_sockaddr_fill(dst, 0, (struct sockaddr *)(sa + socklen), family))
3568 sin->sin_family = AF_INET;
3569 memcpy(&sin->sin_addr.s_addr, src,
3570 sizeof(sin->sin_addr.s_addr));
3571 sin++;
3572 sin->sin_family = AF_INET;
3573 memcpy(&sin->sin_addr.s_addr, dst,
3574 sizeof(sin->sin_addr.s_addr));
3575 break;
3576#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3577 case AF_INET6:
3578 sin6 = (struct sockaddr_in6 *)(rq + 1);
3579 sin6->sin6_family = AF_INET6;
3580 sin6->sin6_port = 0;
3581 sin6->sin6_flowinfo = 0;
3582 sin6->sin6_scope_id = 0;
3583 memcpy(&sin6->sin6_addr.s6_addr, src,
3584 sizeof(sin6->sin6_addr.s6_addr));
3585 sin6++;
3586 sin6->sin6_family = AF_INET6;
3587 sin6->sin6_port = 0;
3588 sin6->sin6_flowinfo = 0;
3589 sin6->sin6_scope_id = 0;
3590 memcpy(&sin6->sin6_addr.s6_addr, dst,
3591 sizeof(sin6->sin6_addr.s6_addr));
3592 break;
3593#endif
3594 default:
3595 return -EINVAL; 3310 return -EINVAL;
3596 }
3597 3311
3598 return 0; 3312 return 0;
3599} 3313}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 97101dcde4c0..5bcc452a247f 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -128,10 +128,8 @@ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
128 128
129static void llc_ui_sk_init(struct socket *sock, struct sock *sk) 129static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
130{ 130{
131 sock_graft(sk, sock);
131 sk->sk_type = sock->type; 132 sk->sk_type = sock->type;
132 sk->sk_sleep = &sock->wait;
133 sk->sk_socket = sock;
134 sock->sk = sk;
135 sock->ops = &llc_ui_ops; 133 sock->ops = &llc_ui_ops;
136} 134}
137 135
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a24b459dd45a..11a1e7fa195d 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -7,14 +7,34 @@ config MAC80211
7 select CRC32 7 select CRC32
8 select WIRELESS_EXT 8 select WIRELESS_EXT
9 select CFG80211 9 select CFG80211
10 select NET_SCH_FIFO
11 ---help--- 10 ---help---
12 This option enables the hardware independent IEEE 802.11 11 This option enables the hardware independent IEEE 802.11
13 networking stack. 12 networking stack.
14 13
14config MAC80211_QOS
15 def_bool y
16 depends on MAC80211
17 depends on NET_SCHED
18 depends on NETDEVICES_MULTIQUEUE
19
20comment "QoS/HT support disabled"
21 depends on MAC80211 && !MAC80211_QOS
22comment "QoS/HT support needs CONFIG_NET_SCHED"
23 depends on MAC80211 && !NET_SCHED
24comment "QoS/HT support needs CONFIG_NETDEVICES_MULTIQUEUE"
25 depends on MAC80211 && !NETDEVICES_MULTIQUEUE
26
15menu "Rate control algorithm selection" 27menu "Rate control algorithm selection"
16 depends on MAC80211 != n 28 depends on MAC80211 != n
17 29
30config MAC80211_RC_PID
31 bool "PID controller based rate control algorithm" if EMBEDDED
32 default y
33 ---help---
34 This option enables a TX rate control algorithm for
35 mac80211 that uses a PID controller to select the TX
36 rate.
37
18choice 38choice
19 prompt "Default rate control algorithm" 39 prompt "Default rate control algorithm"
20 default MAC80211_RC_DEFAULT_PID 40 default MAC80211_RC_DEFAULT_PID
@@ -26,40 +46,19 @@ choice
26 46
27config MAC80211_RC_DEFAULT_PID 47config MAC80211_RC_DEFAULT_PID
28 bool "PID controller based rate control algorithm" 48 bool "PID controller based rate control algorithm"
29 select MAC80211_RC_PID 49 depends on MAC80211_RC_PID
30 ---help--- 50 ---help---
31 Select the PID controller based rate control as the 51 Select the PID controller based rate control as the
32 default rate control algorithm. You should choose 52 default rate control algorithm. You should choose
33 this unless you know what you are doing. 53 this unless you know what you are doing.
34 54
35config MAC80211_RC_DEFAULT_NONE
36 bool "No default algorithm"
37 depends on EMBEDDED
38 help
39 Selecting this option will select no default algorithm
40 and allow you to not build any. Do not choose this
41 option unless you know your driver comes with another
42 suitable algorithm.
43endchoice 55endchoice
44 56
45comment "Selecting 'y' for an algorithm will"
46comment "build the algorithm into mac80211."
47
48config MAC80211_RC_DEFAULT 57config MAC80211_RC_DEFAULT
49 string 58 string
50 default "pid" if MAC80211_RC_DEFAULT_PID 59 default "pid" if MAC80211_RC_DEFAULT_PID
51 default "" 60 default ""
52 61
53config MAC80211_RC_PID
54 tristate "PID controller based rate control algorithm"
55 ---help---
56 This option enables a TX rate control algorithm for
57 mac80211 that uses a PID controller to select the TX
58 rate.
59
60 Say Y or M unless you're sure you want to use a
61 different rate control algorithm.
62
63endmenu 62endmenu
64 63
65config MAC80211_MESH 64config MAC80211_MESH
@@ -89,10 +88,16 @@ config MAC80211_DEBUGFS
89 88
90 Say N unless you know you need this. 89 Say N unless you know you need this.
91 90
91menuconfig MAC80211_DEBUG_MENU
92 bool "Select mac80211 debugging features"
93 depends on MAC80211
94 ---help---
95 This option collects various mac80211 debug settings.
96
92config MAC80211_DEBUG_PACKET_ALIGNMENT 97config MAC80211_DEBUG_PACKET_ALIGNMENT
93 bool "Enable packet alignment debugging" 98 bool "Enable packet alignment debugging"
94 depends on MAC80211 99 depends on MAC80211_DEBUG_MENU
95 help 100 ---help---
96 This option is recommended for driver authors and strongly 101 This option is recommended for driver authors and strongly
97 discouraged for everybody else, it will trigger a warning 102 discouraged for everybody else, it will trigger a warning
98 when a driver hands mac80211 a buffer that is aligned in 103 when a driver hands mac80211 a buffer that is aligned in
@@ -101,33 +106,95 @@ config MAC80211_DEBUG_PACKET_ALIGNMENT
101 106
102 Say N unless you're writing a mac80211 based driver. 107 Say N unless you're writing a mac80211 based driver.
103 108
104config MAC80211_DEBUG 109config MAC80211_NOINLINE
105 bool "Enable debugging output" 110 bool "Do not inline TX/RX handlers"
106 depends on MAC80211 111 depends on MAC80211_DEBUG_MENU
112 ---help---
113 This option affects code generation in mac80211, when
114 selected some functions are marked "noinline" to allow
115 easier debugging of problems in the transmit and receive
116 paths.
117
118 This option increases code size a bit and inserts a lot
119 of function calls in the code, but is otherwise safe to
120 enable.
121
122 If unsure, say N unless you expect to be finding problems
123 in mac80211.
124
125config MAC80211_VERBOSE_DEBUG
126 bool "Verbose debugging output"
127 depends on MAC80211_DEBUG_MENU
107 ---help--- 128 ---help---
108 This option will enable debug tracing output for the 129 Selecting this option causes mac80211 to print out
109 ieee80211 network stack. 130 many debugging messages. It should not be selected
131 on production systems as some of the messages are
132 remotely triggerable.
110 133
111 If you are not trying to debug or develop the ieee80211 134 Do not select this option.
112 subsystem, you most likely want to say N here.
113 135
114config MAC80211_HT_DEBUG 136config MAC80211_HT_DEBUG
115 bool "Enable HT debugging output" 137 bool "Verbose HT debugging"
116 depends on MAC80211_DEBUG 138 depends on MAC80211_DEBUG_MENU
117 ---help--- 139 ---help---
118 This option enables 802.11n High Throughput features 140 This option enables 802.11n High Throughput features
119 debug tracing output. 141 debug tracing output.
120 142
121 If you are not trying to debug of develop the ieee80211 143 It should not be selected on production systems as some
122 subsystem, you most likely want to say N here. 144 of the messages are remotely triggerable.
123 145
124config MAC80211_VERBOSE_DEBUG 146 Do not select this option.
125 bool "Verbose debugging output" 147
126 depends on MAC80211_DEBUG 148config MAC80211_TKIP_DEBUG
149 bool "Verbose TKIP debugging"
150 depends on MAC80211_DEBUG_MENU
151 ---help---
152 Selecting this option causes mac80211 to print out
153 very verbose TKIP debugging messages. It should not
154 be selected on production systems as those messages
155 are remotely triggerable.
156
157 Do not select this option.
158
159config MAC80211_IBSS_DEBUG
160 bool "Verbose IBSS debugging"
161 depends on MAC80211_DEBUG_MENU
162 ---help---
163 Selecting this option causes mac80211 to print out
164 very verbose IBSS debugging messages. It should not
165 be selected on production systems as those messages
166 are remotely triggerable.
167
168 Do not select this option.
169
170config MAC80211_VERBOSE_PS_DEBUG
171 bool "Verbose powersave mode debugging"
172 depends on MAC80211_DEBUG_MENU
173 ---help---
174 Selecting this option causes mac80211 to print out very
175 verbose power save mode debugging messages (when mac80211
176 is an AP and has power saving stations.)
177 It should not be selected on production systems as those
178 messages are remotely triggerable.
179
180 Do not select this option.
181
182config MAC80211_VERBOSE_MPL_DEBUG
183 bool "Verbose mesh peer link debugging"
184 depends on MAC80211_DEBUG_MENU
185 depends on MAC80211_MESH
186 ---help---
187 Selecting this option causes mac80211 to print out very
188 verbose mesh peer link debugging messages (when mac80211
189 is taking part in a mesh network).
190 It should not be selected on production systems as those
191 messages are remotely triggerable.
192
193 Do not select this option.
127 194
128config MAC80211_LOWTX_FRAME_DUMP 195config MAC80211_LOWTX_FRAME_DUMP
129 bool "Debug frame dumping" 196 bool "Debug frame dumping"
130 depends on MAC80211_DEBUG 197 depends on MAC80211_DEBUG_MENU
131 ---help--- 198 ---help---
132 Selecting this option will cause the stack to 199 Selecting this option will cause the stack to
133 print a message for each frame that is handed 200 print a message for each frame that is handed
@@ -138,30 +205,21 @@ config MAC80211_LOWTX_FRAME_DUMP
138 If unsure, say N and insert the debugging code 205 If unsure, say N and insert the debugging code
139 you require into the driver you are debugging. 206 you require into the driver you are debugging.
140 207
141config TKIP_DEBUG
142 bool "TKIP debugging"
143 depends on MAC80211_DEBUG
144
145config MAC80211_DEBUG_COUNTERS 208config MAC80211_DEBUG_COUNTERS
146 bool "Extra statistics for TX/RX debugging" 209 bool "Extra statistics for TX/RX debugging"
147 depends on MAC80211_DEBUG 210 depends on MAC80211_DEBUG
148 211 depends on MAC80211_DEBUG_MENU
149config MAC80211_IBSS_DEBUG 212 depends on MAC80211_DEBUGFS
150 bool "Support for IBSS testing"
151 depends on MAC80211_DEBUG
152 ---help--- 213 ---help---
153 Say Y here if you intend to debug the IBSS code. 214 Selecting this option causes mac80211 to keep additional
215 and very verbose statistics about TX and RX handler use
216 and show them in debugfs.
154 217
155config MAC80211_VERBOSE_PS_DEBUG 218 If unsure, say N.
156 bool "Verbose powersave mode debugging"
157 depends on MAC80211_DEBUG
158 ---help---
159 Say Y here to print out verbose powersave
160 mode debug messages.
161 219
162config MAC80211_VERBOSE_MPL_DEBUG 220config MAC80211_VERBOSE_SPECT_MGMT_DEBUG
163 bool "Verbose mesh peer link debugging" 221 bool "Verbose Spectrum Management (IEEE 802.11h)debugging"
164 depends on MAC80211_DEBUG && MAC80211_MESH 222 depends on MAC80211_DEBUG
165 ---help--- 223 ---help---
166 Say Y here to print out verbose mesh peer link 224 Say Y here to print out verbose Spectrum Management (IEEE 802.11h)
167 debug messages. 225 debug messages.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4e5847fd316c..fa47438e338f 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -1,13 +1,5 @@
1obj-$(CONFIG_MAC80211) += mac80211.o 1obj-$(CONFIG_MAC80211) += mac80211.o
2 2
3# objects for PID algorithm
4rc80211_pid-y := rc80211_pid_algo.o
5rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
6
7# build helper for PID algorithm
8rc-pid-y := $(rc80211_pid-y)
9rc-pid-m := rc80211_pid.o
10
11# mac80211 objects 3# mac80211 objects
12mac80211-y := \ 4mac80211-y := \
13 main.o \ 5 main.o \
@@ -29,7 +21,7 @@ mac80211-y := \
29 event.o 21 event.o
30 22
31mac80211-$(CONFIG_MAC80211_LEDS) += led.o 23mac80211-$(CONFIG_MAC80211_LEDS) += led.o
32mac80211-$(CONFIG_NET_SCHED) += wme.o 24mac80211-$(CONFIG_MAC80211_QOS) += wme.o
33mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 25mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
34 debugfs.o \ 26 debugfs.o \
35 debugfs_sta.o \ 27 debugfs_sta.o \
@@ -42,10 +34,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
42 mesh_plink.o \ 34 mesh_plink.o \
43 mesh_hwmp.o 35 mesh_hwmp.o
44 36
37# objects for PID algorithm
38rc80211_pid-y := rc80211_pid_algo.o
39rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
45 40
46# Build rate control algorithm(s) 41mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
47CFLAGS_rc80211_pid_algo.o += -DRC80211_PID_COMPILE
48mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc-pid-$(CONFIG_MAC80211_RC_PID))
49
50# Modular rate algorithms are assigned to mac80211-m - make separate modules
51obj-m += $(mac80211-m)
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 59f1691f62c8..4d4c2dfcf9a0 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -134,7 +134,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
134} 134}
135 135
136 136
137struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]) 137struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
138{ 138{
139 struct crypto_cipher *tfm; 139 struct crypto_cipher *tfm;
140 140
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 885f19030b29..8cd0f14aab4d 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -14,7 +14,7 @@
14 14
15#define AES_BLOCK_LEN 16 15#define AES_BLOCK_LEN 16
16 16
17struct crypto_cipher * ieee80211_aes_key_setup_encrypt(const u8 key[]); 17struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]);
18void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, 18void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
19 u8 *b_0, u8 *aad, u8 *data, size_t data_len, 19 u8 *b_0, u8 *aad, u8 *data, size_t data_len,
20 u8 *cdata, u8 *mic); 20 u8 *cdata, u8 *mic);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a9fce4afdf21..81087281b031 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -256,8 +256,8 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
256 case ALG_TKIP: 256 case ALG_TKIP:
257 params.cipher = WLAN_CIPHER_SUITE_TKIP; 257 params.cipher = WLAN_CIPHER_SUITE_TKIP;
258 258
259 iv32 = key->u.tkip.iv32; 259 iv32 = key->u.tkip.tx.iv32;
260 iv16 = key->u.tkip.iv16; 260 iv16 = key->u.tkip.tx.iv16;
261 261
262 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && 262 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
263 sdata->local->ops->get_tkip_seq) 263 sdata->local->ops->get_tkip_seq)
@@ -602,6 +602,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
602 */ 602 */
603 603
604 if (params->station_flags & STATION_FLAG_CHANGED) { 604 if (params->station_flags & STATION_FLAG_CHANGED) {
605 spin_lock_bh(&sta->lock);
605 sta->flags &= ~WLAN_STA_AUTHORIZED; 606 sta->flags &= ~WLAN_STA_AUTHORIZED;
606 if (params->station_flags & STATION_FLAG_AUTHORIZED) 607 if (params->station_flags & STATION_FLAG_AUTHORIZED)
607 sta->flags |= WLAN_STA_AUTHORIZED; 608 sta->flags |= WLAN_STA_AUTHORIZED;
@@ -613,6 +614,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
613 sta->flags &= ~WLAN_STA_WME; 614 sta->flags &= ~WLAN_STA_WME;
614 if (params->station_flags & STATION_FLAG_WME) 615 if (params->station_flags & STATION_FLAG_WME)
615 sta->flags |= WLAN_STA_WME; 616 sta->flags |= WLAN_STA_WME;
617 spin_unlock_bh(&sta->lock);
616 } 618 }
617 619
618 /* 620 /*
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 1cccbfd781f6..d20d90eead1f 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -197,45 +197,6 @@ DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u",
197DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", 197DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u",
198 local->tx_status_drop); 198 local->tx_status_drop);
199 199
200static ssize_t stats_wme_rx_queue_read(struct file *file,
201 char __user *userbuf,
202 size_t count, loff_t *ppos)
203{
204 struct ieee80211_local *local = file->private_data;
205 char buf[NUM_RX_DATA_QUEUES*15], *p = buf;
206 int i;
207
208 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
209 p += scnprintf(p, sizeof(buf)+buf-p,
210 "%u\n", local->wme_rx_queue[i]);
211
212 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
213}
214
215static const struct file_operations stats_wme_rx_queue_ops = {
216 .read = stats_wme_rx_queue_read,
217 .open = mac80211_open_file_generic,
218};
219
220static ssize_t stats_wme_tx_queue_read(struct file *file,
221 char __user *userbuf,
222 size_t count, loff_t *ppos)
223{
224 struct ieee80211_local *local = file->private_data;
225 char buf[NUM_TX_DATA_QUEUES*15], *p = buf;
226 int i;
227
228 for (i = 0; i < NUM_TX_DATA_QUEUES; i++)
229 p += scnprintf(p, sizeof(buf)+buf-p,
230 "%u\n", local->wme_tx_queue[i]);
231
232 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
233}
234
235static const struct file_operations stats_wme_tx_queue_ops = {
236 .read = stats_wme_tx_queue_read,
237 .open = mac80211_open_file_generic,
238};
239#endif 200#endif
240 201
241DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); 202DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
@@ -303,8 +264,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
303 DEBUGFS_STATS_ADD(rx_expand_skb_head2); 264 DEBUGFS_STATS_ADD(rx_expand_skb_head2);
304 DEBUGFS_STATS_ADD(rx_handlers_fragments); 265 DEBUGFS_STATS_ADD(rx_handlers_fragments);
305 DEBUGFS_STATS_ADD(tx_status_drop); 266 DEBUGFS_STATS_ADD(tx_status_drop);
306 DEBUGFS_STATS_ADD(wme_tx_queue);
307 DEBUGFS_STATS_ADD(wme_rx_queue);
308#endif 267#endif
309 DEBUGFS_STATS_ADD(dot11ACKFailureCount); 268 DEBUGFS_STATS_ADD(dot11ACKFailureCount);
310 DEBUGFS_STATS_ADD(dot11RTSFailureCount); 269 DEBUGFS_STATS_ADD(dot11RTSFailureCount);
@@ -356,8 +315,6 @@ void debugfs_hw_del(struct ieee80211_local *local)
356 DEBUGFS_STATS_DEL(rx_expand_skb_head2); 315 DEBUGFS_STATS_DEL(rx_expand_skb_head2);
357 DEBUGFS_STATS_DEL(rx_handlers_fragments); 316 DEBUGFS_STATS_DEL(rx_handlers_fragments);
358 DEBUGFS_STATS_DEL(tx_status_drop); 317 DEBUGFS_STATS_DEL(tx_status_drop);
359 DEBUGFS_STATS_DEL(wme_tx_queue);
360 DEBUGFS_STATS_DEL(wme_rx_queue);
361#endif 318#endif
362 DEBUGFS_STATS_DEL(dot11ACKFailureCount); 319 DEBUGFS_STATS_DEL(dot11ACKFailureCount);
363 DEBUGFS_STATS_DEL(dot11RTSFailureCount); 320 DEBUGFS_STATS_DEL(dot11RTSFailureCount);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 19efc3a6a932..7439b63df5d0 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -97,8 +97,8 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
97 break; 97 break;
98 case ALG_TKIP: 98 case ALG_TKIP:
99 len = scnprintf(buf, sizeof(buf), "%08x %04x\n", 99 len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
100 key->u.tkip.iv32, 100 key->u.tkip.tx.iv32,
101 key->u.tkip.iv16); 101 key->u.tkip.tx.iv16);
102 break; 102 break;
103 case ALG_CCMP: 103 case ALG_CCMP:
104 tpn = key->u.ccmp.tx_pn; 104 tpn = key->u.ccmp.tx_pn;
@@ -128,8 +128,8 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
128 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 128 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
129 p += scnprintf(p, sizeof(buf)+buf-p, 129 p += scnprintf(p, sizeof(buf)+buf-p,
130 "%08x %04x\n", 130 "%08x %04x\n",
131 key->u.tkip.iv32_rx[i], 131 key->u.tkip.rx[i].iv32,
132 key->u.tkip.iv16_rx[i]); 132 key->u.tkip.rx[i].iv16);
133 len = p - buf; 133 len = p - buf;
134 break; 134 break;
135 case ALG_CCMP: 135 case ALG_CCMP:
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e3326d046944..b2089b2da48a 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -155,7 +155,6 @@ static const struct file_operations name##_ops = { \
155 __IEEE80211_IF_WFILE(name) 155 __IEEE80211_IF_WFILE(name)
156 156
157/* common attributes */ 157/* common attributes */
158IEEE80211_IF_FILE(channel_use, channel_use, DEC);
159IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 158IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
160 159
161/* STA/IBSS attributes */ 160/* STA/IBSS attributes */
@@ -248,7 +247,6 @@ IEEE80211_IF_WFILE(min_discovery_timeout,
248 247
249static void add_sta_files(struct ieee80211_sub_if_data *sdata) 248static void add_sta_files(struct ieee80211_sub_if_data *sdata)
250{ 249{
251 DEBUGFS_ADD(channel_use, sta);
252 DEBUGFS_ADD(drop_unencrypted, sta); 250 DEBUGFS_ADD(drop_unencrypted, sta);
253 DEBUGFS_ADD(state, sta); 251 DEBUGFS_ADD(state, sta);
254 DEBUGFS_ADD(bssid, sta); 252 DEBUGFS_ADD(bssid, sta);
@@ -269,7 +267,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
269 267
270static void add_ap_files(struct ieee80211_sub_if_data *sdata) 268static void add_ap_files(struct ieee80211_sub_if_data *sdata)
271{ 269{
272 DEBUGFS_ADD(channel_use, ap);
273 DEBUGFS_ADD(drop_unencrypted, ap); 270 DEBUGFS_ADD(drop_unencrypted, ap);
274 DEBUGFS_ADD(num_sta_ps, ap); 271 DEBUGFS_ADD(num_sta_ps, ap);
275 DEBUGFS_ADD(dtim_count, ap); 272 DEBUGFS_ADD(dtim_count, ap);
@@ -281,14 +278,12 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
281 278
282static void add_wds_files(struct ieee80211_sub_if_data *sdata) 279static void add_wds_files(struct ieee80211_sub_if_data *sdata)
283{ 280{
284 DEBUGFS_ADD(channel_use, wds);
285 DEBUGFS_ADD(drop_unencrypted, wds); 281 DEBUGFS_ADD(drop_unencrypted, wds);
286 DEBUGFS_ADD(peer, wds); 282 DEBUGFS_ADD(peer, wds);
287} 283}
288 284
289static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 285static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
290{ 286{
291 DEBUGFS_ADD(channel_use, vlan);
292 DEBUGFS_ADD(drop_unencrypted, vlan); 287 DEBUGFS_ADD(drop_unencrypted, vlan);
293} 288}
294 289
@@ -376,7 +371,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
376 371
377static void del_sta_files(struct ieee80211_sub_if_data *sdata) 372static void del_sta_files(struct ieee80211_sub_if_data *sdata)
378{ 373{
379 DEBUGFS_DEL(channel_use, sta);
380 DEBUGFS_DEL(drop_unencrypted, sta); 374 DEBUGFS_DEL(drop_unencrypted, sta);
381 DEBUGFS_DEL(state, sta); 375 DEBUGFS_DEL(state, sta);
382 DEBUGFS_DEL(bssid, sta); 376 DEBUGFS_DEL(bssid, sta);
@@ -397,7 +391,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
397 391
398static void del_ap_files(struct ieee80211_sub_if_data *sdata) 392static void del_ap_files(struct ieee80211_sub_if_data *sdata)
399{ 393{
400 DEBUGFS_DEL(channel_use, ap);
401 DEBUGFS_DEL(drop_unencrypted, ap); 394 DEBUGFS_DEL(drop_unencrypted, ap);
402 DEBUGFS_DEL(num_sta_ps, ap); 395 DEBUGFS_DEL(num_sta_ps, ap);
403 DEBUGFS_DEL(dtim_count, ap); 396 DEBUGFS_DEL(dtim_count, ap);
@@ -409,14 +402,12 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
409 402
410static void del_wds_files(struct ieee80211_sub_if_data *sdata) 403static void del_wds_files(struct ieee80211_sub_if_data *sdata)
411{ 404{
412 DEBUGFS_DEL(channel_use, wds);
413 DEBUGFS_DEL(drop_unencrypted, wds); 405 DEBUGFS_DEL(drop_unencrypted, wds);
414 DEBUGFS_DEL(peer, wds); 406 DEBUGFS_DEL(peer, wds);
415} 407}
416 408
417static void del_vlan_files(struct ieee80211_sub_if_data *sdata) 409static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
418{ 410{
419 DEBUGFS_DEL(channel_use, vlan);
420 DEBUGFS_DEL(drop_unencrypted, vlan); 411 DEBUGFS_DEL(drop_unencrypted, vlan);
421} 412}
422 413
@@ -528,7 +519,7 @@ void ieee80211_debugfs_change_if_type(struct ieee80211_sub_if_data *sdata,
528 add_files(sdata); 519 add_files(sdata);
529} 520}
530 521
531static int netdev_notify(struct notifier_block * nb, 522static int netdev_notify(struct notifier_block *nb,
532 unsigned long state, 523 unsigned long state,
533 void *ndev) 524 void *ndev)
534{ 525{
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 6d47a1d31b37..79a062782d52 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,10 +63,9 @@ STA_FILE(tx_fragments, tx_fragments, LU);
63STA_FILE(tx_filtered, tx_filtered_count, LU); 63STA_FILE(tx_filtered, tx_filtered_count, LU);
64STA_FILE(tx_retry_failed, tx_retry_failed, LU); 64STA_FILE(tx_retry_failed, tx_retry_failed, LU);
65STA_FILE(tx_retry_count, tx_retry_count, LU); 65STA_FILE(tx_retry_count, tx_retry_count, LU);
66STA_FILE(last_rssi, last_rssi, D);
67STA_FILE(last_signal, last_signal, D); 66STA_FILE(last_signal, last_signal, D);
67STA_FILE(last_qual, last_qual, D);
68STA_FILE(last_noise, last_noise, D); 68STA_FILE(last_noise, last_noise, D);
69STA_FILE(channel_use, channel_use, D);
70STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); 69STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
71 70
72static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 71static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
@@ -74,14 +73,15 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
74{ 73{
75 char buf[100]; 74 char buf[100];
76 struct sta_info *sta = file->private_data; 75 struct sta_info *sta = file->private_data;
76 u32 staflags = get_sta_flags(sta);
77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s", 77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s",
78 sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", 78 staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
79 sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 79 staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
80 sta->flags & WLAN_STA_PS ? "PS\n" : "", 80 staflags & WLAN_STA_PS ? "PS\n" : "",
81 sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 81 staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
82 sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 82 staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
83 sta->flags & WLAN_STA_WME ? "WME\n" : "", 83 staflags & WLAN_STA_WME ? "WME\n" : "",
84 sta->flags & WLAN_STA_WDS ? "WDS\n" : ""); 84 staflags & WLAN_STA_WDS ? "WDS\n" : "");
85 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 85 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
86} 86}
87STA_OPS(flags); 87STA_OPS(flags);
@@ -123,36 +123,6 @@ static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
123} 123}
124STA_OPS(last_seq_ctrl); 124STA_OPS(last_seq_ctrl);
125 125
126#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
127static ssize_t sta_wme_rx_queue_read(struct file *file, char __user *userbuf,
128 size_t count, loff_t *ppos)
129{
130 char buf[15*NUM_RX_DATA_QUEUES], *p = buf;
131 int i;
132 struct sta_info *sta = file->private_data;
133 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
134 p += scnprintf(p, sizeof(buf)+buf-p, "%u ",
135 sta->wme_rx_queue[i]);
136 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
137 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
138}
139STA_OPS(wme_rx_queue);
140
141static ssize_t sta_wme_tx_queue_read(struct file *file, char __user *userbuf,
142 size_t count, loff_t *ppos)
143{
144 char buf[15*NUM_TX_DATA_QUEUES], *p = buf;
145 int i;
146 struct sta_info *sta = file->private_data;
147 for (i = 0; i < NUM_TX_DATA_QUEUES; i++)
148 p += scnprintf(p, sizeof(buf)+buf-p, "%u ",
149 sta->wme_tx_queue[i]);
150 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
151 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
152}
153STA_OPS(wme_tx_queue);
154#endif
155
156static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 126static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
157 size_t count, loff_t *ppos) 127 size_t count, loff_t *ppos)
158{ 128{
@@ -293,10 +263,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
293 DEBUGFS_ADD(num_ps_buf_frames); 263 DEBUGFS_ADD(num_ps_buf_frames);
294 DEBUGFS_ADD(inactive_ms); 264 DEBUGFS_ADD(inactive_ms);
295 DEBUGFS_ADD(last_seq_ctrl); 265 DEBUGFS_ADD(last_seq_ctrl);
296#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
297 DEBUGFS_ADD(wme_rx_queue);
298 DEBUGFS_ADD(wme_tx_queue);
299#endif
300 DEBUGFS_ADD(agg_status); 266 DEBUGFS_ADD(agg_status);
301} 267}
302 268
@@ -306,10 +272,6 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta)
306 DEBUGFS_DEL(num_ps_buf_frames); 272 DEBUGFS_DEL(num_ps_buf_frames);
307 DEBUGFS_DEL(inactive_ms); 273 DEBUGFS_DEL(inactive_ms);
308 DEBUGFS_DEL(last_seq_ctrl); 274 DEBUGFS_DEL(last_seq_ctrl);
309#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
310 DEBUGFS_DEL(wme_rx_queue);
311 DEBUGFS_DEL(wme_tx_queue);
312#endif
313 DEBUGFS_DEL(agg_status); 275 DEBUGFS_DEL(agg_status);
314 276
315 debugfs_remove(sta->debugfs.dir); 277 debugfs_remove(sta->debugfs.dir);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 006486b26726..f90da1bbec49 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2,6 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005, Devicescape Software, Inc. 3 * Copyright 2005, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -23,6 +24,7 @@
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
25#include <net/wireless.h> 26#include <net/wireless.h>
27#include <net/iw_handler.h>
26#include "key.h" 28#include "key.h"
27#include "sta_info.h" 29#include "sta_info.h"
28 30
@@ -82,7 +84,7 @@ struct ieee80211_sta_bss {
82 u16 capability; /* host byte order */ 84 u16 capability; /* host byte order */
83 enum ieee80211_band band; 85 enum ieee80211_band band;
84 int freq; 86 int freq;
85 int rssi, signal, noise; 87 int signal, noise, qual;
86 u8 *wpa_ie; 88 u8 *wpa_ie;
87 size_t wpa_ie_len; 89 size_t wpa_ie_len;
88 u8 *rsn_ie; 90 u8 *rsn_ie;
@@ -91,6 +93,8 @@ struct ieee80211_sta_bss {
91 size_t wmm_ie_len; 93 size_t wmm_ie_len;
92 u8 *ht_ie; 94 u8 *ht_ie;
93 size_t ht_ie_len; 95 size_t ht_ie_len;
96 u8 *ht_add_ie;
97 size_t ht_add_ie_len;
94#ifdef CONFIG_MAC80211_MESH 98#ifdef CONFIG_MAC80211_MESH
95 u8 *mesh_id; 99 u8 *mesh_id;
96 size_t mesh_id_len; 100 size_t mesh_id_len;
@@ -147,7 +151,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
147#define IEEE80211_TX_UNICAST BIT(1) 151#define IEEE80211_TX_UNICAST BIT(1)
148#define IEEE80211_TX_PS_BUFFERED BIT(2) 152#define IEEE80211_TX_PS_BUFFERED BIT(2)
149#define IEEE80211_TX_PROBE_LAST_FRAG BIT(3) 153#define IEEE80211_TX_PROBE_LAST_FRAG BIT(3)
150#define IEEE80211_TX_INJECTED BIT(4)
151 154
152struct ieee80211_tx_data { 155struct ieee80211_tx_data {
153 struct sk_buff *skb; 156 struct sk_buff *skb;
@@ -157,13 +160,12 @@ struct ieee80211_tx_data {
157 struct sta_info *sta; 160 struct sta_info *sta;
158 struct ieee80211_key *key; 161 struct ieee80211_key *key;
159 162
160 struct ieee80211_tx_control *control;
161 struct ieee80211_channel *channel; 163 struct ieee80211_channel *channel;
162 struct ieee80211_rate *rate; 164 s8 rate_idx;
163 /* use this rate (if set) for last fragment; rate can 165 /* use this rate (if set) for last fragment; rate can
164 * be set to lower rate for the first fragments, e.g., 166 * be set to lower rate for the first fragments, e.g.,
165 * when using CTS protection with IEEE 802.11g. */ 167 * when using CTS protection with IEEE 802.11g. */
166 struct ieee80211_rate *last_frag_rate; 168 s8 last_frag_rate_idx;
167 169
168 /* Extra fragments (in addition to the first fragment 170 /* Extra fragments (in addition to the first fragment
169 * in skb) */ 171 * in skb) */
@@ -202,32 +204,16 @@ struct ieee80211_rx_data {
202 unsigned int flags; 204 unsigned int flags;
203 int sent_ps_buffered; 205 int sent_ps_buffered;
204 int queue; 206 int queue;
205 int load;
206 u32 tkip_iv32; 207 u32 tkip_iv32;
207 u16 tkip_iv16; 208 u16 tkip_iv16;
208}; 209};
209 210
210/* flags used in struct ieee80211_tx_packet_data.flags */
211#define IEEE80211_TXPD_REQ_TX_STATUS BIT(0)
212#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1)
213#define IEEE80211_TXPD_REQUEUE BIT(2)
214#define IEEE80211_TXPD_EAPOL_FRAME BIT(3)
215#define IEEE80211_TXPD_AMPDU BIT(4)
216/* Stored in sk_buff->cb */
217struct ieee80211_tx_packet_data {
218 int ifindex;
219 unsigned long jiffies;
220 unsigned int flags;
221 u8 queue;
222};
223
224struct ieee80211_tx_stored_packet { 211struct ieee80211_tx_stored_packet {
225 struct ieee80211_tx_control control;
226 struct sk_buff *skb; 212 struct sk_buff *skb;
227 struct sk_buff **extra_frag; 213 struct sk_buff **extra_frag;
228 struct ieee80211_rate *last_frag_rate; 214 s8 last_frag_rate_idx;
229 int num_extra_frag; 215 int num_extra_frag;
230 unsigned int last_frag_rate_ctrl_probe; 216 bool last_frag_rate_ctrl_probe;
231}; 217};
232 218
233struct beacon_data { 219struct beacon_data {
@@ -464,14 +450,11 @@ struct ieee80211_sub_if_data {
464 struct ieee80211_if_sta sta; 450 struct ieee80211_if_sta sta;
465 u32 mntr_flags; 451 u32 mntr_flags;
466 } u; 452 } u;
467 int channel_use;
468 int channel_use_raw;
469 453
470#ifdef CONFIG_MAC80211_DEBUGFS 454#ifdef CONFIG_MAC80211_DEBUGFS
471 struct dentry *debugfsdir; 455 struct dentry *debugfsdir;
472 union { 456 union {
473 struct { 457 struct {
474 struct dentry *channel_use;
475 struct dentry *drop_unencrypted; 458 struct dentry *drop_unencrypted;
476 struct dentry *state; 459 struct dentry *state;
477 struct dentry *bssid; 460 struct dentry *bssid;
@@ -490,7 +473,6 @@ struct ieee80211_sub_if_data {
490 struct dentry *num_beacons_sta; 473 struct dentry *num_beacons_sta;
491 } sta; 474 } sta;
492 struct { 475 struct {
493 struct dentry *channel_use;
494 struct dentry *drop_unencrypted; 476 struct dentry *drop_unencrypted;
495 struct dentry *num_sta_ps; 477 struct dentry *num_sta_ps;
496 struct dentry *dtim_count; 478 struct dentry *dtim_count;
@@ -500,12 +482,10 @@ struct ieee80211_sub_if_data {
500 struct dentry *num_buffered_multicast; 482 struct dentry *num_buffered_multicast;
501 } ap; 483 } ap;
502 struct { 484 struct {
503 struct dentry *channel_use;
504 struct dentry *drop_unencrypted; 485 struct dentry *drop_unencrypted;
505 struct dentry *peer; 486 struct dentry *peer;
506 } wds; 487 } wds;
507 struct { 488 struct {
508 struct dentry *channel_use;
509 struct dentry *drop_unencrypted; 489 struct dentry *drop_unencrypted;
510 } vlan; 490 } vlan;
511 struct { 491 struct {
@@ -610,8 +590,8 @@ struct ieee80211_local {
610 struct sta_info *sta_hash[STA_HASH_SIZE]; 590 struct sta_info *sta_hash[STA_HASH_SIZE];
611 struct timer_list sta_cleanup; 591 struct timer_list sta_cleanup;
612 592
613 unsigned long state[NUM_TX_DATA_QUEUES_AMPDU]; 593 unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
614 struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES_AMPDU]; 594 struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
615 struct tasklet_struct tx_pending_tasklet; 595 struct tasklet_struct tx_pending_tasklet;
616 596
617 /* number of interfaces with corresponding IFF_ flags */ 597 /* number of interfaces with corresponding IFF_ flags */
@@ -677,9 +657,6 @@ struct ieee80211_local {
677 assoc_led_name[32], radio_led_name[32]; 657 assoc_led_name[32], radio_led_name[32];
678#endif 658#endif
679 659
680 u32 channel_use;
681 u32 channel_use_raw;
682
683#ifdef CONFIG_MAC80211_DEBUGFS 660#ifdef CONFIG_MAC80211_DEBUGFS
684 struct work_struct sta_debugfs_add; 661 struct work_struct sta_debugfs_add;
685#endif 662#endif
@@ -705,8 +682,6 @@ struct ieee80211_local {
705 unsigned int rx_expand_skb_head2; 682 unsigned int rx_expand_skb_head2;
706 unsigned int rx_handlers_fragments; 683 unsigned int rx_handlers_fragments;
707 unsigned int tx_status_drop; 684 unsigned int tx_status_drop;
708 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES];
709 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
710#define I802_DEBUG_INC(c) (c)++ 685#define I802_DEBUG_INC(c) (c)++
711#else /* CONFIG_MAC80211_DEBUG_COUNTERS */ 686#else /* CONFIG_MAC80211_DEBUG_COUNTERS */
712#define I802_DEBUG_INC(c) do { } while (0) 687#define I802_DEBUG_INC(c) do { } while (0)
@@ -764,8 +739,6 @@ struct ieee80211_local {
764 struct dentry *rx_expand_skb_head2; 739 struct dentry *rx_expand_skb_head2;
765 struct dentry *rx_handlers_fragments; 740 struct dentry *rx_handlers_fragments;
766 struct dentry *tx_status_drop; 741 struct dentry *tx_status_drop;
767 struct dentry *wme_tx_queue;
768 struct dentry *wme_rx_queue;
769#endif 742#endif
770 struct dentry *dot11ACKFailureCount; 743 struct dentry *dot11ACKFailureCount;
771 struct dentry *dot11RTSFailureCount; 744 struct dentry *dot11RTSFailureCount;
@@ -778,6 +751,15 @@ struct ieee80211_local {
778#endif 751#endif
779}; 752};
780 753
754static inline int ieee80211_is_multiqueue(struct ieee80211_local *local)
755{
756#ifdef CONFIG_MAC80211_QOS
757 return netif_is_multiqueue(local->mdev);
758#else
759 return 0;
760#endif
761}
762
781/* this struct represents 802.11n's RA/TID combination */ 763/* this struct represents 802.11n's RA/TID combination */
782struct ieee80211_ra_tid { 764struct ieee80211_ra_tid {
783 u8 ra[ETH_ALEN]; 765 u8 ra[ETH_ALEN];
@@ -809,6 +791,10 @@ struct ieee802_11_elems {
809 u8 *preq; 791 u8 *preq;
810 u8 *prep; 792 u8 *prep;
811 u8 *perr; 793 u8 *perr;
794 u8 *ch_switch_elem;
795 u8 *country_elem;
796 u8 *pwr_constr_elem;
797 u8 *quiet_elem; /* first quite element */
812 798
813 /* length of them, respectively */ 799 /* length of them, respectively */
814 u8 ssid_len; 800 u8 ssid_len;
@@ -833,6 +819,11 @@ struct ieee802_11_elems {
833 u8 preq_len; 819 u8 preq_len;
834 u8 prep_len; 820 u8 prep_len;
835 u8 perr_len; 821 u8 perr_len;
822 u8 ch_switch_elem_len;
823 u8 country_elem_len;
824 u8 pwr_constr_elem_len;
825 u8 quiet_elem_len;
826 u8 num_of_quiet_elem; /* can be more the one */
836}; 827};
837 828
838static inline struct ieee80211_local *hw_to_local( 829static inline struct ieee80211_local *hw_to_local(
@@ -847,11 +838,6 @@ static inline struct ieee80211_hw *local_to_hw(
847 return &local->hw; 838 return &local->hw;
848} 839}
849 840
850enum ieee80211_link_state_t {
851 IEEE80211_LINK_STATE_XOFF = 0,
852 IEEE80211_LINK_STATE_PENDING,
853};
854
855struct sta_attribute { 841struct sta_attribute {
856 struct attribute attr; 842 struct attribute attr;
857 ssize_t (*show)(const struct sta_info *, char *buf); 843 ssize_t (*show)(const struct sta_info *, char *buf);
@@ -877,29 +863,8 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
877 863
878/* ieee80211_ioctl.c */ 864/* ieee80211_ioctl.c */
879extern const struct iw_handler_def ieee80211_iw_handler_def; 865extern const struct iw_handler_def ieee80211_iw_handler_def;
880
881
882/* Least common multiple of the used rates (in 100 kbps). This is used to
883 * calculate rate_inv values for each rate so that only integers are needed. */
884#define CHAN_UTIL_RATE_LCM 95040
885/* 1 usec is 1/8 * (95040/10) = 1188 */
886#define CHAN_UTIL_PER_USEC 1188
887/* Amount of bits to shift the result right to scale the total utilization
888 * to values that will not wrap around 32-bit integers. */
889#define CHAN_UTIL_SHIFT 9
890/* Theoretical maximum of channel utilization counter in 10 ms (stat_time=1):
891 * (CHAN_UTIL_PER_USEC * 10000) >> CHAN_UTIL_SHIFT = 23203. So dividing the
892 * raw value with about 23 should give utilization in 10th of a percentage
893 * (1/1000). However, utilization is only estimated and not all intervals
894 * between frames etc. are calculated. 18 seems to give numbers that are closer
895 * to the real maximum. */
896#define CHAN_UTIL_PER_10MS 18
897#define CHAN_UTIL_HDR_LONG (202 * CHAN_UTIL_PER_USEC)
898#define CHAN_UTIL_HDR_SHORT (40 * CHAN_UTIL_PER_USEC)
899
900
901/* ieee80211_ioctl.c */
902int ieee80211_set_freq(struct net_device *dev, int freq); 866int ieee80211_set_freq(struct net_device *dev, int freq);
867
903/* ieee80211_sta.c */ 868/* ieee80211_sta.c */
904void ieee80211_sta_timer(unsigned long data); 869void ieee80211_sta_timer(unsigned long data);
905void ieee80211_sta_work(struct work_struct *work); 870void ieee80211_sta_work(struct work_struct *work);
@@ -912,16 +877,18 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid);
912int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); 877int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len);
913void ieee80211_sta_req_auth(struct net_device *dev, 878void ieee80211_sta_req_auth(struct net_device *dev,
914 struct ieee80211_if_sta *ifsta); 879 struct ieee80211_if_sta *ifsta);
915int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len); 880int ieee80211_sta_scan_results(struct net_device *dev,
881 struct iw_request_info *info,
882 char *buf, size_t len);
916ieee80211_rx_result ieee80211_sta_rx_scan( 883ieee80211_rx_result ieee80211_sta_rx_scan(
917 struct net_device *dev, struct sk_buff *skb, 884 struct net_device *dev, struct sk_buff *skb,
918 struct ieee80211_rx_status *rx_status); 885 struct ieee80211_rx_status *rx_status);
919void ieee80211_rx_bss_list_init(struct net_device *dev); 886void ieee80211_rx_bss_list_init(struct net_device *dev);
920void ieee80211_rx_bss_list_deinit(struct net_device *dev); 887void ieee80211_rx_bss_list_deinit(struct net_device *dev);
921int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); 888int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
922struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, 889struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
923 struct sk_buff *skb, u8 *bssid, 890 struct sk_buff *skb, u8 *bssid,
924 u8 *addr); 891 u8 *addr, u64 supp_rates);
925int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); 892int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason);
926int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); 893int ieee80211_sta_disassociate(struct net_device *dev, u16 reason);
927void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 894void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
@@ -940,7 +907,6 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
940 907
941void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, 908void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
942 u16 tid, u16 initiator, u16 reason); 909 u16 tid, u16 initiator, u16 reason);
943void sta_rx_agg_session_timer_expired(unsigned long data);
944void sta_addba_resp_timer_expired(unsigned long data); 910void sta_addba_resp_timer_expired(unsigned long data);
945void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); 911void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr);
946u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 912u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -988,4 +954,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
988void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 954void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx,
989 struct ieee80211_hdr *hdr); 955 struct ieee80211_hdr *hdr);
990 956
957#ifdef CONFIG_MAC80211_NOINLINE
958#define debug_noinline noinline
959#else
960#define debug_noinline
961#endif
962
991#endif /* IEEE80211_I_H */ 963#endif /* IEEE80211_I_H */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 06e88a5a036d..eeb16926aa7d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -33,9 +33,8 @@ static void ieee80211_if_sdata_deinit(struct ieee80211_sub_if_data *sdata)
33{ 33{
34 int i; 34 int i;
35 35
36 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 36 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
37 __skb_queue_purge(&sdata->fragments[i].skb_list); 37 __skb_queue_purge(&sdata->fragments[i].skb_list);
38 }
39} 38}
40 39
41/* Must be called with rtnl lock held. */ 40/* Must be called with rtnl lock held. */
@@ -167,9 +166,10 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
167 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN | 166 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
168 IEEE80211_AUTH_ALG_SHARED_KEY; 167 IEEE80211_AUTH_ALG_SHARED_KEY;
169 ifsta->flags |= IEEE80211_STA_CREATE_IBSS | 168 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
170 IEEE80211_STA_WMM_ENABLED |
171 IEEE80211_STA_AUTO_BSSID_SEL | 169 IEEE80211_STA_AUTO_BSSID_SEL |
172 IEEE80211_STA_AUTO_CHANNEL_SEL; 170 IEEE80211_STA_AUTO_CHANNEL_SEL;
171 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
172 ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
173 173
174 msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); 174 msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev);
175 sdata->bss = &msdata->u.ap; 175 sdata->bss = &msdata->u.ap;
@@ -184,9 +184,9 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
184 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | 184 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
185 MONITOR_FLAG_OTHER_BSS; 185 MONITOR_FLAG_OTHER_BSS;
186 break; 186 break;
187 default: 187 case IEEE80211_IF_TYPE_INVALID:
188 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x", 188 BUG();
189 dev->name, __func__, type); 189 break;
190 } 190 }
191 ieee80211_debugfs_change_if_type(sdata, oldtype); 191 ieee80211_debugfs_change_if_type(sdata, oldtype);
192} 192}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 220e83be3ef4..6597c779e35a 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -321,8 +321,15 @@ void ieee80211_key_link(struct ieee80211_key *key,
321 * some hardware cannot handle TKIP with QoS, so 321 * some hardware cannot handle TKIP with QoS, so
322 * we indicate whether QoS could be in use. 322 * we indicate whether QoS could be in use.
323 */ 323 */
324 if (sta->flags & WLAN_STA_WME) 324 if (test_sta_flags(sta, WLAN_STA_WME))
325 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; 325 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
326
327 /*
328 * This key is for a specific sta interface,
329 * inform the driver that it should try to store
330 * this key as pairwise key.
331 */
332 key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
326 } else { 333 } else {
327 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 334 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
328 struct sta_info *ap; 335 struct sta_info *ap;
@@ -335,7 +342,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
335 /* same here, the AP could be using QoS */ 342 /* same here, the AP could be using QoS */
336 ap = sta_info_get(key->local, key->sdata->u.sta.bssid); 343 ap = sta_info_get(key->local, key->sdata->u.sta.bssid);
337 if (ap) { 344 if (ap) {
338 if (ap->flags & WLAN_STA_WME) 345 if (test_sta_flags(ap, WLAN_STA_WME))
339 key->conf.flags |= 346 key->conf.flags |=
340 IEEE80211_KEY_FLAG_WMM_STA; 347 IEEE80211_KEY_FLAG_WMM_STA;
341 } 348 }
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index f52c3df1fe9a..425816e0996c 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -16,31 +16,18 @@
16#include <linux/rcupdate.h> 16#include <linux/rcupdate.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18 18
19/* ALG_TKIP 19#define WEP_IV_LEN 4
20 * struct ieee80211_key::key is encoded as a 256-bit (32 byte) data block: 20#define WEP_ICV_LEN 4
21 * Temporal Encryption Key (128 bits) 21#define ALG_TKIP_KEY_LEN 32
22 * Temporal Authenticator Tx MIC Key (64 bits) 22#define ALG_CCMP_KEY_LEN 16
23 * Temporal Authenticator Rx MIC Key (64 bits) 23#define CCMP_HDR_LEN 8
24 */ 24#define CCMP_MIC_LEN 8
25 25#define CCMP_TK_LEN 16
26#define WEP_IV_LEN 4 26#define CCMP_PN_LEN 6
27#define WEP_ICV_LEN 4 27#define TKIP_IV_LEN 8
28 28#define TKIP_ICV_LEN 4
29#define ALG_TKIP_KEY_LEN 32 29
30/* Starting offsets for each key */ 30#define NUM_RX_DATA_QUEUES 17
31#define ALG_TKIP_TEMP_ENCR_KEY 0
32#define ALG_TKIP_TEMP_AUTH_TX_MIC_KEY 16
33#define ALG_TKIP_TEMP_AUTH_RX_MIC_KEY 24
34#define TKIP_IV_LEN 8
35#define TKIP_ICV_LEN 4
36
37#define ALG_CCMP_KEY_LEN 16
38#define CCMP_HDR_LEN 8
39#define CCMP_MIC_LEN 8
40#define CCMP_TK_LEN 16
41#define CCMP_PN_LEN 6
42
43#define NUM_RX_DATA_QUEUES 17
44 31
45struct ieee80211_local; 32struct ieee80211_local;
46struct ieee80211_sub_if_data; 33struct ieee80211_sub_if_data;
@@ -69,6 +56,13 @@ enum ieee80211_internal_key_flags {
69 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), 56 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5),
70}; 57};
71 58
59struct tkip_ctx {
60 u32 iv32;
61 u16 iv16;
62 u16 p1k[5];
63 int initialized;
64};
65
72struct ieee80211_key { 66struct ieee80211_key {
73 struct ieee80211_local *local; 67 struct ieee80211_local *local;
74 struct ieee80211_sub_if_data *sdata; 68 struct ieee80211_sub_if_data *sdata;
@@ -85,16 +79,10 @@ struct ieee80211_key {
85 union { 79 union {
86 struct { 80 struct {
87 /* last used TSC */ 81 /* last used TSC */
88 u32 iv32; 82 struct tkip_ctx tx;
89 u16 iv16;
90 u16 p1k[5];
91 int tx_initialized;
92 83
93 /* last received RSC */ 84 /* last received RSC */
94 u32 iv32_rx[NUM_RX_DATA_QUEUES]; 85 struct tkip_ctx rx[NUM_RX_DATA_QUEUES];
95 u16 iv16_rx[NUM_RX_DATA_QUEUES];
96 u16 p1k_rx[NUM_RX_DATA_QUEUES][5];
97 int rx_initialized[NUM_RX_DATA_QUEUES];
98 } tkip; 86 } tkip;
99 struct { 87 struct {
100 u8 tx_pn[6]; 88 u8 tx_pn[6];
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 98c0b5e56ecc..f18cfd727872 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -35,8 +35,6 @@
35#include "debugfs.h" 35#include "debugfs.h"
36#include "debugfs_netdev.h" 36#include "debugfs_netdev.h"
37 37
38#define SUPP_MCS_SET_LEN 16
39
40/* 38/*
41 * For seeing transmitted packets on monitor interfaces 39 * For seeing transmitted packets on monitor interfaces
42 * we have a radiotap header too. 40 * we have a radiotap header too.
@@ -112,7 +110,13 @@ static int ieee80211_master_open(struct net_device *dev)
112 break; 110 break;
113 } 111 }
114 } 112 }
115 return res; 113
114 if (res)
115 return res;
116
117 netif_start_queue(local->mdev);
118
119 return 0;
116} 120}
117 121
118static int ieee80211_master_stop(struct net_device *dev) 122static int ieee80211_master_stop(struct net_device *dev)
@@ -147,9 +151,7 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
147 /* FIX: what would be proper limits for MTU? 151 /* FIX: what would be proper limits for MTU?
148 * This interface uses 802.3 frames. */ 152 * This interface uses 802.3 frames. */
149 if (new_mtu < 256 || 153 if (new_mtu < 256 ||
150 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { 154 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
151 printk(KERN_WARNING "%s: invalid MTU %d\n",
152 dev->name, new_mtu);
153 return -EINVAL; 155 return -EINVAL;
154 } 156 }
155 157
@@ -346,6 +348,7 @@ static int ieee80211_open(struct net_device *dev)
346 goto err_del_interface; 348 goto err_del_interface;
347 } 349 }
348 350
351 /* no locking required since STA is not live yet */
349 sta->flags |= WLAN_STA_AUTHORIZED; 352 sta->flags |= WLAN_STA_AUTHORIZED;
350 353
351 res = sta_info_insert(sta); 354 res = sta_info_insert(sta);
@@ -385,8 +388,8 @@ static int ieee80211_open(struct net_device *dev)
385 * yet be effective. Trigger execution of ieee80211_sta_work 388 * yet be effective. Trigger execution of ieee80211_sta_work
386 * to fix this. 389 * to fix this.
387 */ 390 */
388 if(sdata->vif.type == IEEE80211_IF_TYPE_STA || 391 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
389 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 392 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
390 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 393 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
391 queue_work(local->hw.workqueue, &ifsta->work); 394 queue_work(local->hw.workqueue, &ifsta->work);
392 } 395 }
@@ -584,17 +587,19 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
584 587
585 sta = sta_info_get(local, ra); 588 sta = sta_info_get(local, ra);
586 if (!sta) { 589 if (!sta) {
590#ifdef CONFIG_MAC80211_HT_DEBUG
587 printk(KERN_DEBUG "Could not find the station\n"); 591 printk(KERN_DEBUG "Could not find the station\n");
588 rcu_read_unlock(); 592#endif
589 return -ENOENT; 593 ret = -ENOENT;
594 goto exit;
590 } 595 }
591 596
592 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 597 spin_lock_bh(&sta->lock);
593 598
594 /* we have tried too many times, receiver does not want A-MPDU */ 599 /* we have tried too many times, receiver does not want A-MPDU */
595 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 600 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
596 ret = -EBUSY; 601 ret = -EBUSY;
597 goto start_ba_exit; 602 goto err_unlock_sta;
598 } 603 }
599 604
600 state = &sta->ampdu_mlme.tid_state_tx[tid]; 605 state = &sta->ampdu_mlme.tid_state_tx[tid];
@@ -605,18 +610,20 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
605 "idle on tid %u\n", tid); 610 "idle on tid %u\n", tid);
606#endif /* CONFIG_MAC80211_HT_DEBUG */ 611#endif /* CONFIG_MAC80211_HT_DEBUG */
607 ret = -EAGAIN; 612 ret = -EAGAIN;
608 goto start_ba_exit; 613 goto err_unlock_sta;
609 } 614 }
610 615
611 /* prepare A-MPDU MLME for Tx aggregation */ 616 /* prepare A-MPDU MLME for Tx aggregation */
612 sta->ampdu_mlme.tid_tx[tid] = 617 sta->ampdu_mlme.tid_tx[tid] =
613 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 618 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
614 if (!sta->ampdu_mlme.tid_tx[tid]) { 619 if (!sta->ampdu_mlme.tid_tx[tid]) {
620#ifdef CONFIG_MAC80211_HT_DEBUG
615 if (net_ratelimit()) 621 if (net_ratelimit())
616 printk(KERN_ERR "allocate tx mlme to tid %d failed\n", 622 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
617 tid); 623 tid);
624#endif
618 ret = -ENOMEM; 625 ret = -ENOMEM;
619 goto start_ba_exit; 626 goto err_unlock_sta;
620 } 627 }
621 /* Tx timer */ 628 /* Tx timer */
622 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 629 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
@@ -639,7 +646,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
639 printk(KERN_DEBUG "BA request denied - queue unavailable for" 646 printk(KERN_DEBUG "BA request denied - queue unavailable for"
640 " tid %d\n", tid); 647 " tid %d\n", tid);
641#endif /* CONFIG_MAC80211_HT_DEBUG */ 648#endif /* CONFIG_MAC80211_HT_DEBUG */
642 goto start_ba_err; 649 goto err_unlock_queue;
643 } 650 }
644 sdata = sta->sdata; 651 sdata = sta->sdata;
645 652
@@ -661,12 +668,13 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
661 " tid %d\n", tid); 668 " tid %d\n", tid);
662#endif /* CONFIG_MAC80211_HT_DEBUG */ 669#endif /* CONFIG_MAC80211_HT_DEBUG */
663 *state = HT_AGG_STATE_IDLE; 670 *state = HT_AGG_STATE_IDLE;
664 goto start_ba_err; 671 goto err_unlock_queue;
665 } 672 }
666 673
667 /* Will put all the packets in the new SW queue */ 674 /* Will put all the packets in the new SW queue */
668 ieee80211_requeue(local, ieee802_1d_to_ac[tid]); 675 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
669 spin_unlock_bh(&local->mdev->queue_lock); 676 spin_unlock_bh(&local->mdev->queue_lock);
677 spin_unlock_bh(&sta->lock);
670 678
671 /* send an addBA request */ 679 /* send an addBA request */
672 sta->ampdu_mlme.dialog_token_allocator++; 680 sta->ampdu_mlme.dialog_token_allocator++;
@@ -674,25 +682,28 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
674 sta->ampdu_mlme.dialog_token_allocator; 682 sta->ampdu_mlme.dialog_token_allocator;
675 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 683 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
676 684
685
677 ieee80211_send_addba_request(sta->sdata->dev, ra, tid, 686 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
678 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 687 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
679 sta->ampdu_mlme.tid_tx[tid]->ssn, 688 sta->ampdu_mlme.tid_tx[tid]->ssn,
680 0x40, 5000); 689 0x40, 5000);
681
682 /* activate the timer for the recipient's addBA response */ 690 /* activate the timer for the recipient's addBA response */
683 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = 691 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
684 jiffies + ADDBA_RESP_INTERVAL; 692 jiffies + ADDBA_RESP_INTERVAL;
685 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 693 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
694#ifdef CONFIG_MAC80211_HT_DEBUG
686 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); 695 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
687 goto start_ba_exit; 696#endif
697 goto exit;
688 698
689start_ba_err: 699err_unlock_queue:
690 kfree(sta->ampdu_mlme.tid_tx[tid]); 700 kfree(sta->ampdu_mlme.tid_tx[tid]);
691 sta->ampdu_mlme.tid_tx[tid] = NULL; 701 sta->ampdu_mlme.tid_tx[tid] = NULL;
692 spin_unlock_bh(&local->mdev->queue_lock); 702 spin_unlock_bh(&local->mdev->queue_lock);
693 ret = -EBUSY; 703 ret = -EBUSY;
694start_ba_exit: 704err_unlock_sta:
695 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 705 spin_unlock_bh(&sta->lock);
706exit:
696 rcu_read_unlock(); 707 rcu_read_unlock();
697 return ret; 708 return ret;
698} 709}
@@ -720,7 +731,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
720 731
721 /* check if the TID is in aggregation */ 732 /* check if the TID is in aggregation */
722 state = &sta->ampdu_mlme.tid_state_tx[tid]; 733 state = &sta->ampdu_mlme.tid_state_tx[tid];
723 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 734 spin_lock_bh(&sta->lock);
724 735
725 if (*state != HT_AGG_STATE_OPERATIONAL) { 736 if (*state != HT_AGG_STATE_OPERATIONAL) {
726 ret = -ENOENT; 737 ret = -ENOENT;
@@ -750,7 +761,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
750 } 761 }
751 762
752stop_BA_exit: 763stop_BA_exit:
753 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 764 spin_unlock_bh(&sta->lock);
754 rcu_read_unlock(); 765 rcu_read_unlock();
755 return ret; 766 return ret;
756} 767}
@@ -764,8 +775,10 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
764 DECLARE_MAC_BUF(mac); 775 DECLARE_MAC_BUF(mac);
765 776
766 if (tid >= STA_TID_NUM) { 777 if (tid >= STA_TID_NUM) {
778#ifdef CONFIG_MAC80211_HT_DEBUG
767 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 779 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
768 tid, STA_TID_NUM); 780 tid, STA_TID_NUM);
781#endif
769 return; 782 return;
770 } 783 }
771 784
@@ -773,18 +786,22 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
773 sta = sta_info_get(local, ra); 786 sta = sta_info_get(local, ra);
774 if (!sta) { 787 if (!sta) {
775 rcu_read_unlock(); 788 rcu_read_unlock();
789#ifdef CONFIG_MAC80211_HT_DEBUG
776 printk(KERN_DEBUG "Could not find station: %s\n", 790 printk(KERN_DEBUG "Could not find station: %s\n",
777 print_mac(mac, ra)); 791 print_mac(mac, ra));
792#endif
778 return; 793 return;
779 } 794 }
780 795
781 state = &sta->ampdu_mlme.tid_state_tx[tid]; 796 state = &sta->ampdu_mlme.tid_state_tx[tid];
782 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 797 spin_lock_bh(&sta->lock);
783 798
784 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 799 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
800#ifdef CONFIG_MAC80211_HT_DEBUG
785 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", 801 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
786 *state); 802 *state);
787 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 803#endif
804 spin_unlock_bh(&sta->lock);
788 rcu_read_unlock(); 805 rcu_read_unlock();
789 return; 806 return;
790 } 807 }
@@ -794,10 +811,12 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
794 *state |= HT_ADDBA_DRV_READY_MSK; 811 *state |= HT_ADDBA_DRV_READY_MSK;
795 812
796 if (*state == HT_AGG_STATE_OPERATIONAL) { 813 if (*state == HT_AGG_STATE_OPERATIONAL) {
814#ifdef CONFIG_MAC80211_HT_DEBUG
797 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); 815 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
816#endif
798 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 817 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
799 } 818 }
800 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 819 spin_unlock_bh(&sta->lock);
801 rcu_read_unlock(); 820 rcu_read_unlock();
802} 821}
803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); 822EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
@@ -811,8 +830,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
811 DECLARE_MAC_BUF(mac); 830 DECLARE_MAC_BUF(mac);
812 831
813 if (tid >= STA_TID_NUM) { 832 if (tid >= STA_TID_NUM) {
833#ifdef CONFIG_MAC80211_HT_DEBUG
814 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 834 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
815 tid, STA_TID_NUM); 835 tid, STA_TID_NUM);
836#endif
816 return; 837 return;
817 } 838 }
818 839
@@ -824,17 +845,22 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
824 rcu_read_lock(); 845 rcu_read_lock();
825 sta = sta_info_get(local, ra); 846 sta = sta_info_get(local, ra);
826 if (!sta) { 847 if (!sta) {
848#ifdef CONFIG_MAC80211_HT_DEBUG
827 printk(KERN_DEBUG "Could not find station: %s\n", 849 printk(KERN_DEBUG "Could not find station: %s\n",
828 print_mac(mac, ra)); 850 print_mac(mac, ra));
851#endif
829 rcu_read_unlock(); 852 rcu_read_unlock();
830 return; 853 return;
831 } 854 }
832 state = &sta->ampdu_mlme.tid_state_tx[tid]; 855 state = &sta->ampdu_mlme.tid_state_tx[tid];
833 856
834 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 857 /* NOTE: no need to use sta->lock in this state check, as
858 * ieee80211_stop_tx_ba_session will let only
859 * one stop call to pass through per sta/tid */
835 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { 860 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
861#ifdef CONFIG_MAC80211_HT_DEBUG
836 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); 862 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
837 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 863#endif
838 rcu_read_unlock(); 864 rcu_read_unlock();
839 return; 865 return;
840 } 866 }
@@ -857,11 +883,12 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
857 * ieee80211_wake_queue is not used here as this queue is not 883 * ieee80211_wake_queue is not used here as this queue is not
858 * necessarily stopped */ 884 * necessarily stopped */
859 netif_schedule(local->mdev); 885 netif_schedule(local->mdev);
886 spin_lock_bh(&sta->lock);
860 *state = HT_AGG_STATE_IDLE; 887 *state = HT_AGG_STATE_IDLE;
861 sta->ampdu_mlme.addba_req_num[tid] = 0; 888 sta->ampdu_mlme.addba_req_num[tid] = 0;
862 kfree(sta->ampdu_mlme.tid_tx[tid]); 889 kfree(sta->ampdu_mlme.tid_tx[tid]);
863 sta->ampdu_mlme.tid_tx[tid] = NULL; 890 sta->ampdu_mlme.tid_tx[tid] = NULL;
864 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 891 spin_unlock_bh(&sta->lock);
865 892
866 rcu_read_unlock(); 893 rcu_read_unlock();
867} 894}
@@ -875,9 +902,11 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
875 struct sk_buff *skb = dev_alloc_skb(0); 902 struct sk_buff *skb = dev_alloc_skb(0);
876 903
877 if (unlikely(!skb)) { 904 if (unlikely(!skb)) {
905#ifdef CONFIG_MAC80211_HT_DEBUG
878 if (net_ratelimit()) 906 if (net_ratelimit())
879 printk(KERN_WARNING "%s: Not enough memory, " 907 printk(KERN_WARNING "%s: Not enough memory, "
880 "dropping start BA session", skb->dev->name); 908 "dropping start BA session", skb->dev->name);
909#endif
881 return; 910 return;
882 } 911 }
883 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 912 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
@@ -898,9 +927,11 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
898 struct sk_buff *skb = dev_alloc_skb(0); 927 struct sk_buff *skb = dev_alloc_skb(0);
899 928
900 if (unlikely(!skb)) { 929 if (unlikely(!skb)) {
930#ifdef CONFIG_MAC80211_HT_DEBUG
901 if (net_ratelimit()) 931 if (net_ratelimit())
902 printk(KERN_WARNING "%s: Not enough memory, " 932 printk(KERN_WARNING "%s: Not enough memory, "
903 "dropping stop BA session", skb->dev->name); 933 "dropping stop BA session", skb->dev->name);
934#endif
904 return; 935 return;
905 } 936 }
906 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 937 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
@@ -967,8 +998,7 @@ void ieee80211_if_setup(struct net_device *dev)
967/* everything else */ 998/* everything else */
968 999
969static int __ieee80211_if_config(struct net_device *dev, 1000static int __ieee80211_if_config(struct net_device *dev,
970 struct sk_buff *beacon, 1001 struct sk_buff *beacon)
971 struct ieee80211_tx_control *control)
972{ 1002{
973 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1003 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
974 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1004 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -986,13 +1016,11 @@ static int __ieee80211_if_config(struct net_device *dev,
986 conf.ssid_len = sdata->u.sta.ssid_len; 1016 conf.ssid_len = sdata->u.sta.ssid_len;
987 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 1017 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
988 conf.beacon = beacon; 1018 conf.beacon = beacon;
989 conf.beacon_control = control;
990 ieee80211_start_mesh(dev); 1019 ieee80211_start_mesh(dev);
991 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 1020 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
992 conf.ssid = sdata->u.ap.ssid; 1021 conf.ssid = sdata->u.ap.ssid;
993 conf.ssid_len = sdata->u.ap.ssid_len; 1022 conf.ssid_len = sdata->u.ap.ssid_len;
994 conf.beacon = beacon; 1023 conf.beacon = beacon;
995 conf.beacon_control = control;
996 } 1024 }
997 return local->ops->config_interface(local_to_hw(local), 1025 return local->ops->config_interface(local_to_hw(local),
998 &sdata->vif, &conf); 1026 &sdata->vif, &conf);
@@ -1005,23 +1033,21 @@ int ieee80211_if_config(struct net_device *dev)
1005 if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT && 1033 if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
1006 (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) 1034 (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
1007 return ieee80211_if_config_beacon(dev); 1035 return ieee80211_if_config_beacon(dev);
1008 return __ieee80211_if_config(dev, NULL, NULL); 1036 return __ieee80211_if_config(dev, NULL);
1009} 1037}
1010 1038
1011int ieee80211_if_config_beacon(struct net_device *dev) 1039int ieee80211_if_config_beacon(struct net_device *dev)
1012{ 1040{
1013 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1041 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1014 struct ieee80211_tx_control control;
1015 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1042 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1016 struct sk_buff *skb; 1043 struct sk_buff *skb;
1017 1044
1018 if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) 1045 if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
1019 return 0; 1046 return 0;
1020 skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif, 1047 skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif);
1021 &control);
1022 if (!skb) 1048 if (!skb)
1023 return -ENOMEM; 1049 return -ENOMEM;
1024 return __ieee80211_if_config(dev, skb, &control); 1050 return __ieee80211_if_config(dev, skb);
1025} 1051}
1026 1052
1027int ieee80211_hw_config(struct ieee80211_local *local) 1053int ieee80211_hw_config(struct ieee80211_local *local)
@@ -1068,56 +1094,84 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
1068 struct ieee80211_supported_band *sband; 1094 struct ieee80211_supported_band *sband;
1069 struct ieee80211_ht_info ht_conf; 1095 struct ieee80211_ht_info ht_conf;
1070 struct ieee80211_ht_bss_info ht_bss_conf; 1096 struct ieee80211_ht_bss_info ht_bss_conf;
1071 int i;
1072 u32 changed = 0; 1097 u32 changed = 0;
1098 int i;
1099 u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
1100 u8 tx_mcs_set_cap;
1073 1101
1074 sband = local->hw.wiphy->bands[conf->channel->band]; 1102 sband = local->hw.wiphy->bands[conf->channel->band];
1075 1103
1104 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
1105 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
1106
1076 /* HT is not supported */ 1107 /* HT is not supported */
1077 if (!sband->ht_info.ht_supported) { 1108 if (!sband->ht_info.ht_supported) {
1078 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; 1109 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1079 return 0; 1110 goto out;
1080 } 1111 }
1081 1112
1082 memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info)); 1113 /* disable HT */
1083 memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info)); 1114 if (!enable_ht) {
1084 1115 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
1085 if (enable_ht) {
1086 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1087 changed |= BSS_CHANGED_HT; 1116 changed |= BSS_CHANGED_HT;
1117 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1118 conf->ht_conf.ht_supported = 0;
1119 goto out;
1120 }
1088 1121
1089 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1090 ht_conf.ht_supported = 1;
1091 1122
1092 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; 1123 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
1093 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); 1124 changed |= BSS_CHANGED_HT;
1094 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1095 1125
1096 for (i = 0; i < SUPP_MCS_SET_LEN; i++) 1126 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
1097 ht_conf.supp_mcs_set[i] = 1127 ht_conf.ht_supported = 1;
1098 sband->ht_info.supp_mcs_set[i] &
1099 req_ht_cap->supp_mcs_set[i];
1100 1128
1101 ht_bss_conf.primary_channel = req_bss_cap->primary_channel; 1129 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
1102 ht_bss_conf.bss_cap = req_bss_cap->bss_cap; 1130 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
1103 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; 1131 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
1132 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
1133 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
1134 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
1104 1135
1105 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor; 1136 ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
1106 ht_conf.ampdu_density = req_ht_cap->ampdu_density; 1137 ht_conf.ampdu_density = req_ht_cap->ampdu_density;
1107 1138
1108 /* if bss configuration changed store the new one */ 1139 /* Bits 96-100 */
1109 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) || 1140 tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
1110 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) { 1141
1111 changed |= BSS_CHANGED_HT; 1142 /* configure suppoerted Tx MCS according to requested MCS
1112 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf)); 1143 * (based in most cases on Rx capabilities of peer) and self
1113 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf)); 1144 * Tx MCS capabilities (as defined by low level driver HW
1114 } 1145 * Tx capabilities) */
1115 } else { 1146 if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
1116 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) 1147 goto check_changed;
1117 changed |= BSS_CHANGED_HT;
1118 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
1119 }
1120 1148
1149 /* Counting from 0 therfore + 1 */
1150 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
1151 max_tx_streams = ((tx_mcs_set_cap &
1152 IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
1153
1154 for (i = 0; i < max_tx_streams; i++)
1155 ht_conf.supp_mcs_set[i] =
1156 sband->ht_info.supp_mcs_set[i] &
1157 req_ht_cap->supp_mcs_set[i];
1158
1159 if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
1160 for (i = IEEE80211_SUPP_MCS_SET_UEQM;
1161 i < IEEE80211_SUPP_MCS_SET_LEN; i++)
1162 ht_conf.supp_mcs_set[i] =
1163 sband->ht_info.supp_mcs_set[i] &
1164 req_ht_cap->supp_mcs_set[i];
1165
1166check_changed:
1167 /* if bss configuration changed store the new one */
1168 if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
1169 memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
1170 changed |= BSS_CHANGED_HT;
1171 memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
1172 memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
1173 }
1174out:
1121 return changed; 1175 return changed;
1122} 1176}
1123 1177
@@ -1148,38 +1202,20 @@ void ieee80211_reset_erp_info(struct net_device *dev)
1148} 1202}
1149 1203
1150void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, 1204void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1151 struct sk_buff *skb, 1205 struct sk_buff *skb)
1152 struct ieee80211_tx_status *status)
1153{ 1206{
1154 struct ieee80211_local *local = hw_to_local(hw); 1207 struct ieee80211_local *local = hw_to_local(hw);
1155 struct ieee80211_tx_status *saved; 1208 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1156 int tmp; 1209 int tmp;
1157 1210
1158 skb->dev = local->mdev; 1211 skb->dev = local->mdev;
1159 saved = kmalloc(sizeof(struct ieee80211_tx_status), GFP_ATOMIC);
1160 if (unlikely(!saved)) {
1161 if (net_ratelimit())
1162 printk(KERN_WARNING "%s: Not enough memory, "
1163 "dropping tx status", skb->dev->name);
1164 /* should be dev_kfree_skb_irq, but due to this function being
1165 * named _irqsafe instead of just _irq we can't be sure that
1166 * people won't call it from non-irq contexts */
1167 dev_kfree_skb_any(skb);
1168 return;
1169 }
1170 memcpy(saved, status, sizeof(struct ieee80211_tx_status));
1171 /* copy pointer to saved status into skb->cb for use by tasklet */
1172 memcpy(skb->cb, &saved, sizeof(saved));
1173
1174 skb->pkt_type = IEEE80211_TX_STATUS_MSG; 1212 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
1175 skb_queue_tail(status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS ? 1213 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
1176 &local->skb_queue : &local->skb_queue_unreliable, skb); 1214 &local->skb_queue : &local->skb_queue_unreliable, skb);
1177 tmp = skb_queue_len(&local->skb_queue) + 1215 tmp = skb_queue_len(&local->skb_queue) +
1178 skb_queue_len(&local->skb_queue_unreliable); 1216 skb_queue_len(&local->skb_queue_unreliable);
1179 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && 1217 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
1180 (skb = skb_dequeue(&local->skb_queue_unreliable))) { 1218 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
1181 memcpy(&saved, skb->cb, sizeof(saved));
1182 kfree(saved);
1183 dev_kfree_skb_irq(skb); 1219 dev_kfree_skb_irq(skb);
1184 tmp--; 1220 tmp--;
1185 I802_DEBUG_INC(local->tx_status_drop); 1221 I802_DEBUG_INC(local->tx_status_drop);
@@ -1193,7 +1229,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
1193 struct ieee80211_local *local = (struct ieee80211_local *) data; 1229 struct ieee80211_local *local = (struct ieee80211_local *) data;
1194 struct sk_buff *skb; 1230 struct sk_buff *skb;
1195 struct ieee80211_rx_status rx_status; 1231 struct ieee80211_rx_status rx_status;
1196 struct ieee80211_tx_status *tx_status;
1197 struct ieee80211_ra_tid *ra_tid; 1232 struct ieee80211_ra_tid *ra_tid;
1198 1233
1199 while ((skb = skb_dequeue(&local->skb_queue)) || 1234 while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -1208,12 +1243,8 @@ static void ieee80211_tasklet_handler(unsigned long data)
1208 __ieee80211_rx(local_to_hw(local), skb, &rx_status); 1243 __ieee80211_rx(local_to_hw(local), skb, &rx_status);
1209 break; 1244 break;
1210 case IEEE80211_TX_STATUS_MSG: 1245 case IEEE80211_TX_STATUS_MSG:
1211 /* get pointer to saved status out of skb->cb */
1212 memcpy(&tx_status, skb->cb, sizeof(tx_status));
1213 skb->pkt_type = 0; 1246 skb->pkt_type = 0;
1214 ieee80211_tx_status(local_to_hw(local), 1247 ieee80211_tx_status(local_to_hw(local), skb);
1215 skb, tx_status);
1216 kfree(tx_status);
1217 break; 1248 break;
1218 case IEEE80211_DELBA_MSG: 1249 case IEEE80211_DELBA_MSG:
1219 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 1250 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
@@ -1227,9 +1258,8 @@ static void ieee80211_tasklet_handler(unsigned long data)
1227 ra_tid->ra, ra_tid->tid); 1258 ra_tid->ra, ra_tid->tid);
1228 dev_kfree_skb(skb); 1259 dev_kfree_skb(skb);
1229 break ; 1260 break ;
1230 default: /* should never get here! */ 1261 default:
1231 printk(KERN_ERR "%s: Unknown message type (%d)\n", 1262 WARN_ON(1);
1232 wiphy_name(local->hw.wiphy), skb->pkt_type);
1233 dev_kfree_skb(skb); 1263 dev_kfree_skb(skb);
1234 break; 1264 break;
1235 } 1265 }
@@ -1242,24 +1272,15 @@ static void ieee80211_tasklet_handler(unsigned long data)
1242 * Also, tx_packet_data in cb is restored from tx_control. */ 1272 * Also, tx_packet_data in cb is restored from tx_control. */
1243static void ieee80211_remove_tx_extra(struct ieee80211_local *local, 1273static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1244 struct ieee80211_key *key, 1274 struct ieee80211_key *key,
1245 struct sk_buff *skb, 1275 struct sk_buff *skb)
1246 struct ieee80211_tx_control *control)
1247{ 1276{
1248 int hdrlen, iv_len, mic_len; 1277 int hdrlen, iv_len, mic_len;
1249 struct ieee80211_tx_packet_data *pkt_data; 1278 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1250 1279
1251 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1280 info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
1252 pkt_data->ifindex = vif_to_sdata(control->vif)->dev->ifindex; 1281 IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
1253 pkt_data->flags = 0; 1282 IEEE80211_TX_CTL_REQUEUE |
1254 if (control->flags & IEEE80211_TXCTL_REQ_TX_STATUS) 1283 IEEE80211_TX_CTL_EAPOL_FRAME;
1255 pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS;
1256 if (control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)
1257 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT;
1258 if (control->flags & IEEE80211_TXCTL_REQUEUE)
1259 pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
1260 if (control->flags & IEEE80211_TXCTL_EAPOL_FRAME)
1261 pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME;
1262 pkt_data->queue = control->queue;
1263 1284
1264 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1285 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1265 1286
@@ -1306,9 +1327,10 @@ no_key:
1306 1327
1307static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, 1328static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1308 struct sta_info *sta, 1329 struct sta_info *sta,
1309 struct sk_buff *skb, 1330 struct sk_buff *skb)
1310 struct ieee80211_tx_status *status)
1311{ 1331{
1332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1333
1312 sta->tx_filtered_count++; 1334 sta->tx_filtered_count++;
1313 1335
1314 /* 1336 /*
@@ -1316,7 +1338,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1316 * packet. If the STA went to power save mode, this will happen 1338 * packet. If the STA went to power save mode, this will happen
1317 * when it wakes up for the next time. 1339 * when it wakes up for the next time.
1318 */ 1340 */
1319 sta->flags |= WLAN_STA_CLEAR_PS_FILT; 1341 set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
1320 1342
1321 /* 1343 /*
1322 * This code races in the following way: 1344 * This code races in the following way:
@@ -1348,84 +1370,72 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1348 * can be unknown, for example with different interrupt status 1370 * can be unknown, for example with different interrupt status
1349 * bits. 1371 * bits.
1350 */ 1372 */
1351 if (sta->flags & WLAN_STA_PS && 1373 if (test_sta_flags(sta, WLAN_STA_PS) &&
1352 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { 1374 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
1353 ieee80211_remove_tx_extra(local, sta->key, skb, 1375 ieee80211_remove_tx_extra(local, sta->key, skb);
1354 &status->control);
1355 skb_queue_tail(&sta->tx_filtered, skb); 1376 skb_queue_tail(&sta->tx_filtered, skb);
1356 return; 1377 return;
1357 } 1378 }
1358 1379
1359 if (!(sta->flags & WLAN_STA_PS) && 1380 if (!test_sta_flags(sta, WLAN_STA_PS) &&
1360 !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) { 1381 !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
1361 /* Software retry the packet once */ 1382 /* Software retry the packet once */
1362 status->control.flags |= IEEE80211_TXCTL_REQUEUE; 1383 info->flags |= IEEE80211_TX_CTL_REQUEUE;
1363 ieee80211_remove_tx_extra(local, sta->key, skb, 1384 ieee80211_remove_tx_extra(local, sta->key, skb);
1364 &status->control);
1365 dev_queue_xmit(skb); 1385 dev_queue_xmit(skb);
1366 return; 1386 return;
1367 } 1387 }
1368 1388
1389#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1369 if (net_ratelimit()) 1390 if (net_ratelimit())
1370 printk(KERN_DEBUG "%s: dropped TX filtered frame, " 1391 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
1371 "queue_len=%d PS=%d @%lu\n", 1392 "queue_len=%d PS=%d @%lu\n",
1372 wiphy_name(local->hw.wiphy), 1393 wiphy_name(local->hw.wiphy),
1373 skb_queue_len(&sta->tx_filtered), 1394 skb_queue_len(&sta->tx_filtered),
1374 !!(sta->flags & WLAN_STA_PS), jiffies); 1395 !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
1396#endif
1375 dev_kfree_skb(skb); 1397 dev_kfree_skb(skb);
1376} 1398}
1377 1399
1378void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 1400void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1379 struct ieee80211_tx_status *status)
1380{ 1401{
1381 struct sk_buff *skb2; 1402 struct sk_buff *skb2;
1382 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1403 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1383 struct ieee80211_local *local = hw_to_local(hw); 1404 struct ieee80211_local *local = hw_to_local(hw);
1405 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1384 u16 frag, type; 1406 u16 frag, type;
1385 struct ieee80211_tx_status_rtap_hdr *rthdr; 1407 struct ieee80211_tx_status_rtap_hdr *rthdr;
1386 struct ieee80211_sub_if_data *sdata; 1408 struct ieee80211_sub_if_data *sdata;
1387 struct net_device *prev_dev = NULL; 1409 struct net_device *prev_dev = NULL;
1388 1410
1389 if (!status) {
1390 printk(KERN_ERR
1391 "%s: ieee80211_tx_status called with NULL status\n",
1392 wiphy_name(local->hw.wiphy));
1393 dev_kfree_skb(skb);
1394 return;
1395 }
1396
1397 rcu_read_lock(); 1411 rcu_read_lock();
1398 1412
1399 if (status->excessive_retries) { 1413 if (info->status.excessive_retries) {
1400 struct sta_info *sta; 1414 struct sta_info *sta;
1401 sta = sta_info_get(local, hdr->addr1); 1415 sta = sta_info_get(local, hdr->addr1);
1402 if (sta) { 1416 if (sta) {
1403 if (sta->flags & WLAN_STA_PS) { 1417 if (test_sta_flags(sta, WLAN_STA_PS)) {
1404 /* 1418 /*
1405 * The STA is in power save mode, so assume 1419 * The STA is in power save mode, so assume
1406 * that this TX packet failed because of that. 1420 * that this TX packet failed because of that.
1407 */ 1421 */
1408 status->excessive_retries = 0; 1422 ieee80211_handle_filtered_frame(local, sta, skb);
1409 status->flags |= IEEE80211_TX_STATUS_TX_FILTERED;
1410 ieee80211_handle_filtered_frame(local, sta,
1411 skb, status);
1412 rcu_read_unlock(); 1423 rcu_read_unlock();
1413 return; 1424 return;
1414 } 1425 }
1415 } 1426 }
1416 } 1427 }
1417 1428
1418 if (status->flags & IEEE80211_TX_STATUS_TX_FILTERED) { 1429 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1419 struct sta_info *sta; 1430 struct sta_info *sta;
1420 sta = sta_info_get(local, hdr->addr1); 1431 sta = sta_info_get(local, hdr->addr1);
1421 if (sta) { 1432 if (sta) {
1422 ieee80211_handle_filtered_frame(local, sta, skb, 1433 ieee80211_handle_filtered_frame(local, sta, skb);
1423 status);
1424 rcu_read_unlock(); 1434 rcu_read_unlock();
1425 return; 1435 return;
1426 } 1436 }
1427 } else 1437 } else
1428 rate_control_tx_status(local->mdev, skb, status); 1438 rate_control_tx_status(local->mdev, skb);
1429 1439
1430 rcu_read_unlock(); 1440 rcu_read_unlock();
1431 1441
@@ -1439,14 +1449,14 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1439 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1449 frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1440 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; 1450 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
1441 1451
1442 if (status->flags & IEEE80211_TX_STATUS_ACK) { 1452 if (info->flags & IEEE80211_TX_STAT_ACK) {
1443 if (frag == 0) { 1453 if (frag == 0) {
1444 local->dot11TransmittedFrameCount++; 1454 local->dot11TransmittedFrameCount++;
1445 if (is_multicast_ether_addr(hdr->addr1)) 1455 if (is_multicast_ether_addr(hdr->addr1))
1446 local->dot11MulticastTransmittedFrameCount++; 1456 local->dot11MulticastTransmittedFrameCount++;
1447 if (status->retry_count > 0) 1457 if (info->status.retry_count > 0)
1448 local->dot11RetryCount++; 1458 local->dot11RetryCount++;
1449 if (status->retry_count > 1) 1459 if (info->status.retry_count > 1)
1450 local->dot11MultipleRetryCount++; 1460 local->dot11MultipleRetryCount++;
1451 } 1461 }
1452 1462
@@ -1483,7 +1493,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1483 return; 1493 return;
1484 } 1494 }
1485 1495
1486 rthdr = (struct ieee80211_tx_status_rtap_hdr*) 1496 rthdr = (struct ieee80211_tx_status_rtap_hdr *)
1487 skb_push(skb, sizeof(*rthdr)); 1497 skb_push(skb, sizeof(*rthdr));
1488 1498
1489 memset(rthdr, 0, sizeof(*rthdr)); 1499 memset(rthdr, 0, sizeof(*rthdr));
@@ -1492,17 +1502,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
1492 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | 1502 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
1493 (1 << IEEE80211_RADIOTAP_DATA_RETRIES)); 1503 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
1494 1504
1495 if (!(status->flags & IEEE80211_TX_STATUS_ACK) && 1505 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
1496 !is_multicast_ether_addr(hdr->addr1)) 1506 !is_multicast_ether_addr(hdr->addr1))
1497 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); 1507 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
1498 1508
1499 if ((status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) && 1509 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
1500 (status->control.flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) 1510 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
1501 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); 1511 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
1502 else if (status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) 1512 else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
1503 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); 1513 rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
1504 1514
1505 rthdr->data_retries = status->retry_count; 1515 rthdr->data_retries = info->status.retry_count;
1506 1516
1507 /* XXX: is this sufficient for BPF? */ 1517 /* XXX: is this sufficient for BPF? */
1508 skb_set_mac_header(skb, 0); 1518 skb_set_mac_header(skb, 0);
@@ -1652,12 +1662,32 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1652 if (result < 0) 1662 if (result < 0)
1653 return result; 1663 return result;
1654 1664
1665 /*
1666 * We use the number of queues for feature tests (QoS, HT) internally
1667 * so restrict them appropriately.
1668 */
1669#ifdef CONFIG_MAC80211_QOS
1670 if (hw->queues > IEEE80211_MAX_QUEUES)
1671 hw->queues = IEEE80211_MAX_QUEUES;
1672 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
1673 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
1674 if (hw->queues < 4)
1675 hw->ampdu_queues = 0;
1676#else
1677 hw->queues = 1;
1678 hw->ampdu_queues = 0;
1679#endif
1680
1655 /* for now, mdev needs sub_if_data :/ */ 1681 /* for now, mdev needs sub_if_data :/ */
1656 mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), 1682 mdev = alloc_netdev_mq(sizeof(struct ieee80211_sub_if_data),
1657 "wmaster%d", ether_setup); 1683 "wmaster%d", ether_setup,
1684 ieee80211_num_queues(hw));
1658 if (!mdev) 1685 if (!mdev)
1659 goto fail_mdev_alloc; 1686 goto fail_mdev_alloc;
1660 1687
1688 if (ieee80211_num_queues(hw) > 1)
1689 mdev->features |= NETIF_F_MULTI_QUEUE;
1690
1661 sdata = IEEE80211_DEV_TO_SUB_IF(mdev); 1691 sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
1662 mdev->ieee80211_ptr = &sdata->wdev; 1692 mdev->ieee80211_ptr = &sdata->wdev;
1663 sdata->wdev.wiphy = local->hw.wiphy; 1693 sdata->wdev.wiphy = local->hw.wiphy;
@@ -1684,7 +1714,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1684 list_add_tail(&sdata->list, &local->interfaces); 1714 list_add_tail(&sdata->list, &local->interfaces);
1685 1715
1686 name = wiphy_dev(local->hw.wiphy)->driver->name; 1716 name = wiphy_dev(local->hw.wiphy)->driver->name;
1687 local->hw.workqueue = create_singlethread_workqueue(name); 1717 local->hw.workqueue = create_freezeable_workqueue(name);
1688 if (!local->hw.workqueue) { 1718 if (!local->hw.workqueue) {
1689 result = -ENOMEM; 1719 result = -ENOMEM;
1690 goto fail_workqueue; 1720 goto fail_workqueue;
@@ -1700,15 +1730,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1700 1730
1701 debugfs_hw_add(local); 1731 debugfs_hw_add(local);
1702 1732
1703 local->hw.conf.beacon_int = 1000; 1733 if (local->hw.conf.beacon_int < 10)
1734 local->hw.conf.beacon_int = 100;
1704 1735
1705 local->wstats_flags |= local->hw.max_rssi ? 1736 local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
1706 IW_QUAL_LEVEL_UPDATED : IW_QUAL_LEVEL_INVALID; 1737 IEEE80211_HW_SIGNAL_DB |
1707 local->wstats_flags |= local->hw.max_signal ? 1738 IEEE80211_HW_SIGNAL_DBM) ?
1708 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID; 1739 IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
1709 local->wstats_flags |= local->hw.max_noise ? 1740 local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
1710 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID; 1741 IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
1711 if (local->hw.max_rssi < 0 || local->hw.max_noise < 0) 1742 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
1712 local->wstats_flags |= IW_QUAL_DBM; 1743 local->wstats_flags |= IW_QUAL_DBM;
1713 1744
1714 result = sta_info_start(local); 1745 result = sta_info_start(local);
@@ -1858,7 +1889,9 @@ static int __init ieee80211_init(void)
1858 struct sk_buff *skb; 1889 struct sk_buff *skb;
1859 int ret; 1890 int ret;
1860 1891
1861 BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); 1892 BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
1893 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
1894 IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
1862 1895
1863 ret = rc80211_pid_init(); 1896 ret = rc80211_pid_init();
1864 if (ret) 1897 if (ret)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 697ef67f96b6..b5933b271491 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -315,6 +315,13 @@ struct mesh_table *mesh_table_alloc(int size_order)
315 return newtbl; 315 return newtbl;
316} 316}
317 317
318static void __mesh_table_free(struct mesh_table *tbl)
319{
320 kfree(tbl->hash_buckets);
321 kfree(tbl->hashwlock);
322 kfree(tbl);
323}
324
318void mesh_table_free(struct mesh_table *tbl, bool free_leafs) 325void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
319{ 326{
320 struct hlist_head *mesh_hash; 327 struct hlist_head *mesh_hash;
@@ -330,9 +337,7 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
330 } 337 }
331 spin_unlock(&tbl->hashwlock[i]); 338 spin_unlock(&tbl->hashwlock[i]);
332 } 339 }
333 kfree(tbl->hash_buckets); 340 __mesh_table_free(tbl);
334 kfree(tbl->hashwlock);
335 kfree(tbl);
336} 341}
337 342
338static void ieee80211_mesh_path_timer(unsigned long data) 343static void ieee80211_mesh_path_timer(unsigned long data)
@@ -349,21 +354,16 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
349{ 354{
350 struct mesh_table *newtbl; 355 struct mesh_table *newtbl;
351 struct hlist_head *oldhash; 356 struct hlist_head *oldhash;
352 struct hlist_node *p; 357 struct hlist_node *p, *q;
353 int err = 0;
354 int i; 358 int i;
355 359
356 if (atomic_read(&tbl->entries) 360 if (atomic_read(&tbl->entries)
357 < tbl->mean_chain_len * (tbl->hash_mask + 1)) { 361 < tbl->mean_chain_len * (tbl->hash_mask + 1))
358 err = -EPERM;
359 goto endgrow; 362 goto endgrow;
360 }
361 363
362 newtbl = mesh_table_alloc(tbl->size_order + 1); 364 newtbl = mesh_table_alloc(tbl->size_order + 1);
363 if (!newtbl) { 365 if (!newtbl)
364 err = -ENOMEM;
365 goto endgrow; 366 goto endgrow;
366 }
367 367
368 newtbl->free_node = tbl->free_node; 368 newtbl->free_node = tbl->free_node;
369 newtbl->mean_chain_len = tbl->mean_chain_len; 369 newtbl->mean_chain_len = tbl->mean_chain_len;
@@ -373,13 +373,19 @@ struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
373 oldhash = tbl->hash_buckets; 373 oldhash = tbl->hash_buckets;
374 for (i = 0; i <= tbl->hash_mask; i++) 374 for (i = 0; i <= tbl->hash_mask; i++)
375 hlist_for_each(p, &oldhash[i]) 375 hlist_for_each(p, &oldhash[i])
376 tbl->copy_node(p, newtbl); 376 if (tbl->copy_node(p, newtbl) < 0)
377 goto errcopy;
377 378
379 return newtbl;
380
381errcopy:
382 for (i = 0; i <= newtbl->hash_mask; i++) {
383 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
384 tbl->free_node(p, 0);
385 }
386 __mesh_table_free(tbl);
378endgrow: 387endgrow:
379 if (err) 388 return NULL;
380 return NULL;
381 else
382 return newtbl;
383} 389}
384 390
385/** 391/**
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 2e161f6d8288..669eafafe497 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -109,7 +109,7 @@ struct mesh_table {
109 __u32 hash_rnd; /* Used for hash generation */ 109 __u32 hash_rnd; /* Used for hash generation */
110 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ 110 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
111 void (*free_node) (struct hlist_node *p, bool free_leafs); 111 void (*free_node) (struct hlist_node *p, bool free_leafs);
112 void (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 112 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
113 int size_order; 113 int size_order;
114 int mean_chain_len; 114 int mean_chain_len;
115}; 115};
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index af0cd1e3e213..7fa149e230e6 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -26,7 +26,7 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
26{ 26{
27 if (ae) 27 if (ae)
28 offset += 6; 28 offset += 6;
29 return le32_to_cpu(get_unaligned((__le32 *) (preq_elem + offset))); 29 return get_unaligned_le32(preq_elem + offset);
30} 30}
31 31
32/* HWMP IE processing macros */ 32/* HWMP IE processing macros */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 99c2d360888e..5f88a2e6ee50 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -158,19 +158,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC; 159 return -ENOSPC;
160 160
161 err = -ENOMEM;
161 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 162 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
162 if (!new_mpath) { 163 if (!new_mpath)
163 atomic_dec(&sdata->u.sta.mpaths); 164 goto err_path_alloc;
164 err = -ENOMEM; 165
165 goto endadd2;
166 }
167 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 166 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
168 if (!new_node) { 167 if (!new_node)
169 kfree(new_mpath); 168 goto err_node_alloc;
170 atomic_dec(&sdata->u.sta.mpaths);
171 err = -ENOMEM;
172 goto endadd2;
173 }
174 169
175 read_lock(&pathtbl_resize_lock); 170 read_lock(&pathtbl_resize_lock);
176 memcpy(new_mpath->dst, dst, ETH_ALEN); 171 memcpy(new_mpath->dst, dst, ETH_ALEN);
@@ -189,16 +184,11 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
189 184
190 spin_lock(&mesh_paths->hashwlock[hash_idx]); 185 spin_lock(&mesh_paths->hashwlock[hash_idx]);
191 186
187 err = -EEXIST;
192 hlist_for_each_entry(node, n, bucket, list) { 188 hlist_for_each_entry(node, n, bucket, list) {
193 mpath = node->mpath; 189 mpath = node->mpath;
194 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) 190 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
195 == 0) { 191 goto err_exists;
196 err = -EEXIST;
197 atomic_dec(&sdata->u.sta.mpaths);
198 kfree(new_node);
199 kfree(new_mpath);
200 goto endadd;
201 }
202 } 192 }
203 193
204 hlist_add_head_rcu(&new_node->list, bucket); 194 hlist_add_head_rcu(&new_node->list, bucket);
@@ -206,10 +196,9 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
206 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 196 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
207 grow = 1; 197 grow = 1;
208 198
209endadd:
210 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 199 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
211 read_unlock(&pathtbl_resize_lock); 200 read_unlock(&pathtbl_resize_lock);
212 if (!err && grow) { 201 if (grow) {
213 struct mesh_table *oldtbl, *newtbl; 202 struct mesh_table *oldtbl, *newtbl;
214 203
215 write_lock(&pathtbl_resize_lock); 204 write_lock(&pathtbl_resize_lock);
@@ -217,7 +206,7 @@ endadd:
217 newtbl = mesh_table_grow(mesh_paths); 206 newtbl = mesh_table_grow(mesh_paths);
218 if (!newtbl) { 207 if (!newtbl) {
219 write_unlock(&pathtbl_resize_lock); 208 write_unlock(&pathtbl_resize_lock);
220 return -ENOMEM; 209 return 0;
221 } 210 }
222 rcu_assign_pointer(mesh_paths, newtbl); 211 rcu_assign_pointer(mesh_paths, newtbl);
223 write_unlock(&pathtbl_resize_lock); 212 write_unlock(&pathtbl_resize_lock);
@@ -225,7 +214,16 @@ endadd:
225 synchronize_rcu(); 214 synchronize_rcu();
226 mesh_table_free(oldtbl, false); 215 mesh_table_free(oldtbl, false);
227 } 216 }
228endadd2: 217 return 0;
218
219err_exists:
220 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
221 read_unlock(&pathtbl_resize_lock);
222 kfree(new_node);
223err_node_alloc:
224 kfree(new_mpath);
225err_path_alloc:
226 atomic_dec(&sdata->u.sta.mpaths);
229 return err; 227 return err;
230} 228}
231 229
@@ -264,7 +262,6 @@ void mesh_plink_broken(struct sta_info *sta)
264 } 262 }
265 rcu_read_unlock(); 263 rcu_read_unlock();
266} 264}
267EXPORT_SYMBOL(mesh_plink_broken);
268 265
269/** 266/**
270 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 267 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
@@ -460,25 +457,28 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
460 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 457 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
461 mpath = node->mpath; 458 mpath = node->mpath;
462 hlist_del_rcu(p); 459 hlist_del_rcu(p);
463 synchronize_rcu();
464 if (free_leafs) 460 if (free_leafs)
465 kfree(mpath); 461 kfree(mpath);
466 kfree(node); 462 kfree(node);
467} 463}
468 464
469static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) 465static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
470{ 466{
471 struct mesh_path *mpath; 467 struct mesh_path *mpath;
472 struct mpath_node *node, *new_node; 468 struct mpath_node *node, *new_node;
473 u32 hash_idx; 469 u32 hash_idx;
474 470
471 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
472 if (new_node == NULL)
473 return -ENOMEM;
474
475 node = hlist_entry(p, struct mpath_node, list); 475 node = hlist_entry(p, struct mpath_node, list);
476 mpath = node->mpath; 476 mpath = node->mpath;
477 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
478 new_node->mpath = mpath; 477 new_node->mpath = mpath;
479 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); 478 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
480 hlist_add_head(&new_node->list, 479 hlist_add_head(&new_node->list,
481 &newtbl->hash_buckets[hash_idx]); 480 &newtbl->hash_buckets[hash_idx]);
481 return 0;
482} 482}
483 483
484int mesh_pathtbl_init(void) 484int mesh_pathtbl_init(void)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 37f0c2b94ae7..9efeb1f07025 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -79,7 +79,7 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
79 * 79 *
80 * @sta: mes peer link to restart 80 * @sta: mes peer link to restart
81 * 81 *
82 * Locking: this function must be called holding sta->plink_lock 82 * Locking: this function must be called holding sta->lock
83 */ 83 */
84static inline void mesh_plink_fsm_restart(struct sta_info *sta) 84static inline void mesh_plink_fsm_restart(struct sta_info *sta)
85{ 85{
@@ -105,7 +105,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 105 if (!sta)
106 return NULL; 106 return NULL;
107 107
108 sta->flags |= WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED;
109 sta->supp_rates[local->hw.conf.channel->band] = rates; 109 sta->supp_rates[local->hw.conf.channel->band] = rates;
110 110
111 return sta; 111 return sta;
@@ -118,7 +118,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
118 * 118 *
119 * All mesh paths with this peer as next hop will be flushed 119 * All mesh paths with this peer as next hop will be flushed
120 * 120 *
121 * Locking: the caller must hold sta->plink_lock 121 * Locking: the caller must hold sta->lock
122 */ 122 */
123static void __mesh_plink_deactivate(struct sta_info *sta) 123static void __mesh_plink_deactivate(struct sta_info *sta)
124{ 124{
@@ -139,9 +139,9 @@ static void __mesh_plink_deactivate(struct sta_info *sta)
139 */ 139 */
140void mesh_plink_deactivate(struct sta_info *sta) 140void mesh_plink_deactivate(struct sta_info *sta)
141{ 141{
142 spin_lock_bh(&sta->plink_lock); 142 spin_lock_bh(&sta->lock);
143 __mesh_plink_deactivate(sta); 143 __mesh_plink_deactivate(sta);
144 spin_unlock_bh(&sta->plink_lock); 144 spin_unlock_bh(&sta->lock);
145} 145}
146 146
147static int mesh_plink_frame_tx(struct net_device *dev, 147static int mesh_plink_frame_tx(struct net_device *dev,
@@ -270,10 +270,10 @@ static void mesh_plink_timer(unsigned long data)
270 */ 270 */
271 sta = (struct sta_info *) data; 271 sta = (struct sta_info *) data;
272 272
273 spin_lock_bh(&sta->plink_lock); 273 spin_lock_bh(&sta->lock);
274 if (sta->ignore_plink_timer) { 274 if (sta->ignore_plink_timer) {
275 sta->ignore_plink_timer = false; 275 sta->ignore_plink_timer = false;
276 spin_unlock_bh(&sta->plink_lock); 276 spin_unlock_bh(&sta->lock);
277 return; 277 return;
278 } 278 }
279 mpl_dbg("Mesh plink timer for %s fired on state %d\n", 279 mpl_dbg("Mesh plink timer for %s fired on state %d\n",
@@ -298,7 +298,7 @@ static void mesh_plink_timer(unsigned long data)
298 rand % sta->plink_timeout; 298 rand % sta->plink_timeout;
299 ++sta->plink_retries; 299 ++sta->plink_retries;
300 mod_plink_timer(sta, sta->plink_timeout); 300 mod_plink_timer(sta, sta->plink_timeout);
301 spin_unlock_bh(&sta->plink_lock); 301 spin_unlock_bh(&sta->lock);
302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
303 0, 0); 303 0, 0);
304 break; 304 break;
@@ -311,7 +311,7 @@ static void mesh_plink_timer(unsigned long data)
311 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); 311 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
312 sta->plink_state = PLINK_HOLDING; 312 sta->plink_state = PLINK_HOLDING;
313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
314 spin_unlock_bh(&sta->plink_lock); 314 spin_unlock_bh(&sta->lock);
315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, 315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid,
316 reason); 316 reason);
317 break; 317 break;
@@ -319,10 +319,10 @@ static void mesh_plink_timer(unsigned long data)
319 /* holding timer */ 319 /* holding timer */
320 del_timer(&sta->plink_timer); 320 del_timer(&sta->plink_timer);
321 mesh_plink_fsm_restart(sta); 321 mesh_plink_fsm_restart(sta);
322 spin_unlock_bh(&sta->plink_lock); 322 spin_unlock_bh(&sta->lock);
323 break; 323 break;
324 default: 324 default:
325 spin_unlock_bh(&sta->plink_lock); 325 spin_unlock_bh(&sta->lock);
326 break; 326 break;
327 } 327 }
328} 328}
@@ -344,16 +344,16 @@ int mesh_plink_open(struct sta_info *sta)
344 DECLARE_MAC_BUF(mac); 344 DECLARE_MAC_BUF(mac);
345#endif 345#endif
346 346
347 spin_lock_bh(&sta->plink_lock); 347 spin_lock_bh(&sta->lock);
348 get_random_bytes(&llid, 2); 348 get_random_bytes(&llid, 2);
349 sta->llid = llid; 349 sta->llid = llid;
350 if (sta->plink_state != PLINK_LISTEN) { 350 if (sta->plink_state != PLINK_LISTEN) {
351 spin_unlock_bh(&sta->plink_lock); 351 spin_unlock_bh(&sta->lock);
352 return -EBUSY; 352 return -EBUSY;
353 } 353 }
354 sta->plink_state = PLINK_OPN_SNT; 354 sta->plink_state = PLINK_OPN_SNT;
355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
356 spin_unlock_bh(&sta->plink_lock); 356 spin_unlock_bh(&sta->lock);
357 mpl_dbg("Mesh plink: starting establishment with %s\n", 357 mpl_dbg("Mesh plink: starting establishment with %s\n",
358 print_mac(mac, sta->addr)); 358 print_mac(mac, sta->addr));
359 359
@@ -367,10 +367,10 @@ void mesh_plink_block(struct sta_info *sta)
367 DECLARE_MAC_BUF(mac); 367 DECLARE_MAC_BUF(mac);
368#endif 368#endif
369 369
370 spin_lock_bh(&sta->plink_lock); 370 spin_lock_bh(&sta->lock);
371 __mesh_plink_deactivate(sta); 371 __mesh_plink_deactivate(sta);
372 sta->plink_state = PLINK_BLOCKED; 372 sta->plink_state = PLINK_BLOCKED;
373 spin_unlock_bh(&sta->plink_lock); 373 spin_unlock_bh(&sta->lock);
374} 374}
375 375
376int mesh_plink_close(struct sta_info *sta) 376int mesh_plink_close(struct sta_info *sta)
@@ -383,14 +383,14 @@ int mesh_plink_close(struct sta_info *sta)
383 383
384 mpl_dbg("Mesh plink: closing link with %s\n", 384 mpl_dbg("Mesh plink: closing link with %s\n",
385 print_mac(mac, sta->addr)); 385 print_mac(mac, sta->addr));
386 spin_lock_bh(&sta->plink_lock); 386 spin_lock_bh(&sta->lock);
387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); 387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
388 reason = sta->reason; 388 reason = sta->reason;
389 389
390 if (sta->plink_state == PLINK_LISTEN || 390 if (sta->plink_state == PLINK_LISTEN ||
391 sta->plink_state == PLINK_BLOCKED) { 391 sta->plink_state == PLINK_BLOCKED) {
392 mesh_plink_fsm_restart(sta); 392 mesh_plink_fsm_restart(sta);
393 spin_unlock_bh(&sta->plink_lock); 393 spin_unlock_bh(&sta->lock);
394 return 0; 394 return 0;
395 } else if (sta->plink_state == PLINK_ESTAB) { 395 } else if (sta->plink_state == PLINK_ESTAB) {
396 __mesh_plink_deactivate(sta); 396 __mesh_plink_deactivate(sta);
@@ -402,7 +402,7 @@ int mesh_plink_close(struct sta_info *sta)
402 sta->plink_state = PLINK_HOLDING; 402 sta->plink_state = PLINK_HOLDING;
403 llid = sta->llid; 403 llid = sta->llid;
404 plid = sta->plid; 404 plid = sta->plid;
405 spin_unlock_bh(&sta->plink_lock); 405 spin_unlock_bh(&sta->lock);
406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, 406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid,
407 plid, reason); 407 plid, reason);
408 return 0; 408 return 0;
@@ -490,7 +490,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
490 /* avoid warning */ 490 /* avoid warning */
491 break; 491 break;
492 } 492 }
493 spin_lock_bh(&sta->plink_lock); 493 spin_lock_bh(&sta->lock);
494 } else if (!sta) { 494 } else if (!sta) {
495 /* ftype == PLINK_OPEN */ 495 /* ftype == PLINK_OPEN */
496 u64 rates; 496 u64 rates;
@@ -512,9 +512,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
512 return; 512 return;
513 } 513 }
514 event = OPN_ACPT; 514 event = OPN_ACPT;
515 spin_lock_bh(&sta->plink_lock); 515 spin_lock_bh(&sta->lock);
516 } else { 516 } else {
517 spin_lock_bh(&sta->plink_lock); 517 spin_lock_bh(&sta->lock);
518 switch (ftype) { 518 switch (ftype) {
519 case PLINK_OPEN: 519 case PLINK_OPEN:
520 if (!mesh_plink_free_count(sdata) || 520 if (!mesh_plink_free_count(sdata) ||
@@ -551,7 +551,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
551 break; 551 break;
552 default: 552 default:
553 mpl_dbg("Mesh plink: unknown frame subtype\n"); 553 mpl_dbg("Mesh plink: unknown frame subtype\n");
554 spin_unlock_bh(&sta->plink_lock); 554 spin_unlock_bh(&sta->lock);
555 rcu_read_unlock(); 555 rcu_read_unlock();
556 return; 556 return;
557 } 557 }
@@ -568,7 +568,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
568 switch (event) { 568 switch (event) {
569 case CLS_ACPT: 569 case CLS_ACPT:
570 mesh_plink_fsm_restart(sta); 570 mesh_plink_fsm_restart(sta);
571 spin_unlock_bh(&sta->plink_lock); 571 spin_unlock_bh(&sta->lock);
572 break; 572 break;
573 case OPN_ACPT: 573 case OPN_ACPT:
574 sta->plink_state = PLINK_OPN_RCVD; 574 sta->plink_state = PLINK_OPN_RCVD;
@@ -576,14 +576,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
576 get_random_bytes(&llid, 2); 576 get_random_bytes(&llid, 2);
577 sta->llid = llid; 577 sta->llid = llid;
578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
579 spin_unlock_bh(&sta->plink_lock); 579 spin_unlock_bh(&sta->lock);
580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid,
581 0, 0); 581 0, 0);
582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, 582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr,
583 llid, plid, 0); 583 llid, plid, 0);
584 break; 584 break;
585 default: 585 default:
586 spin_unlock_bh(&sta->plink_lock); 586 spin_unlock_bh(&sta->lock);
587 break; 587 break;
588 } 588 }
589 break; 589 break;
@@ -603,7 +603,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
603 sta->ignore_plink_timer = true; 603 sta->ignore_plink_timer = true;
604 604
605 llid = sta->llid; 605 llid = sta->llid;
606 spin_unlock_bh(&sta->plink_lock); 606 spin_unlock_bh(&sta->lock);
607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
608 plid, reason); 608 plid, reason);
609 break; 609 break;
@@ -612,7 +612,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
612 sta->plink_state = PLINK_OPN_RCVD; 612 sta->plink_state = PLINK_OPN_RCVD;
613 sta->plid = plid; 613 sta->plid = plid;
614 llid = sta->llid; 614 llid = sta->llid;
615 spin_unlock_bh(&sta->plink_lock); 615 spin_unlock_bh(&sta->lock);
616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
617 plid, 0); 617 plid, 0);
618 break; 618 break;
@@ -622,10 +622,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
622 dot11MeshConfirmTimeout(sdata))) 622 dot11MeshConfirmTimeout(sdata)))
623 sta->ignore_plink_timer = true; 623 sta->ignore_plink_timer = true;
624 624
625 spin_unlock_bh(&sta->plink_lock); 625 spin_unlock_bh(&sta->lock);
626 break; 626 break;
627 default: 627 default:
628 spin_unlock_bh(&sta->plink_lock); 628 spin_unlock_bh(&sta->lock);
629 break; 629 break;
630 } 630 }
631 break; 631 break;
@@ -645,13 +645,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
645 sta->ignore_plink_timer = true; 645 sta->ignore_plink_timer = true;
646 646
647 llid = sta->llid; 647 llid = sta->llid;
648 spin_unlock_bh(&sta->plink_lock); 648 spin_unlock_bh(&sta->lock);
649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
650 plid, reason); 650 plid, reason);
651 break; 651 break;
652 case OPN_ACPT: 652 case OPN_ACPT:
653 llid = sta->llid; 653 llid = sta->llid;
654 spin_unlock_bh(&sta->plink_lock); 654 spin_unlock_bh(&sta->lock);
655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
656 plid, 0); 656 plid, 0);
657 break; 657 break;
@@ -659,12 +659,12 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
659 del_timer(&sta->plink_timer); 659 del_timer(&sta->plink_timer);
660 sta->plink_state = PLINK_ESTAB; 660 sta->plink_state = PLINK_ESTAB;
661 mesh_plink_inc_estab_count(sdata); 661 mesh_plink_inc_estab_count(sdata);
662 spin_unlock_bh(&sta->plink_lock); 662 spin_unlock_bh(&sta->lock);
663 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 663 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
664 print_mac(mac, sta->addr)); 664 print_mac(mac, sta->addr));
665 break; 665 break;
666 default: 666 default:
667 spin_unlock_bh(&sta->plink_lock); 667 spin_unlock_bh(&sta->lock);
668 break; 668 break;
669 } 669 }
670 break; 670 break;
@@ -684,7 +684,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
684 sta->ignore_plink_timer = true; 684 sta->ignore_plink_timer = true;
685 685
686 llid = sta->llid; 686 llid = sta->llid;
687 spin_unlock_bh(&sta->plink_lock); 687 spin_unlock_bh(&sta->lock);
688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
689 plid, reason); 689 plid, reason);
690 break; 690 break;
@@ -692,14 +692,14 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
692 del_timer(&sta->plink_timer); 692 del_timer(&sta->plink_timer);
693 sta->plink_state = PLINK_ESTAB; 693 sta->plink_state = PLINK_ESTAB;
694 mesh_plink_inc_estab_count(sdata); 694 mesh_plink_inc_estab_count(sdata);
695 spin_unlock_bh(&sta->plink_lock); 695 spin_unlock_bh(&sta->lock);
696 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 696 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
697 print_mac(mac, sta->addr)); 697 print_mac(mac, sta->addr));
698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
699 plid, 0); 699 plid, 0);
700 break; 700 break;
701 default: 701 default:
702 spin_unlock_bh(&sta->plink_lock); 702 spin_unlock_bh(&sta->lock);
703 break; 703 break;
704 } 704 }
705 break; 705 break;
@@ -713,18 +713,18 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
713 sta->plink_state = PLINK_HOLDING; 713 sta->plink_state = PLINK_HOLDING;
714 llid = sta->llid; 714 llid = sta->llid;
715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
716 spin_unlock_bh(&sta->plink_lock); 716 spin_unlock_bh(&sta->lock);
717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
718 plid, reason); 718 plid, reason);
719 break; 719 break;
720 case OPN_ACPT: 720 case OPN_ACPT:
721 llid = sta->llid; 721 llid = sta->llid;
722 spin_unlock_bh(&sta->plink_lock); 722 spin_unlock_bh(&sta->lock);
723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid,
724 plid, 0); 724 plid, 0);
725 break; 725 break;
726 default: 726 default:
727 spin_unlock_bh(&sta->plink_lock); 727 spin_unlock_bh(&sta->lock);
728 break; 728 break;
729 } 729 }
730 break; 730 break;
@@ -734,7 +734,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
734 if (del_timer(&sta->plink_timer)) 734 if (del_timer(&sta->plink_timer))
735 sta->ignore_plink_timer = 1; 735 sta->ignore_plink_timer = 1;
736 mesh_plink_fsm_restart(sta); 736 mesh_plink_fsm_restart(sta);
737 spin_unlock_bh(&sta->plink_lock); 737 spin_unlock_bh(&sta->lock);
738 break; 738 break;
739 case OPN_ACPT: 739 case OPN_ACPT:
740 case CNF_ACPT: 740 case CNF_ACPT:
@@ -742,19 +742,19 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
742 case CNF_RJCT: 742 case CNF_RJCT:
743 llid = sta->llid; 743 llid = sta->llid;
744 reason = sta->reason; 744 reason = sta->reason;
745 spin_unlock_bh(&sta->plink_lock); 745 spin_unlock_bh(&sta->lock);
746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid,
747 plid, reason); 747 plid, reason);
748 break; 748 break;
749 default: 749 default:
750 spin_unlock_bh(&sta->plink_lock); 750 spin_unlock_bh(&sta->lock);
751 } 751 }
752 break; 752 break;
753 default: 753 default:
754 /* should not get here, PLINK_BLOCKED is dealt with at the 754 /* should not get here, PLINK_BLOCKED is dealt with at the
755 * beggining of the function 755 * beggining of the function
756 */ 756 */
757 spin_unlock_bh(&sta->plink_lock); 757 spin_unlock_bh(&sta->lock);
758 break; 758 break;
759 } 759 }
760 760
diff --git a/net/mac80211/michael.c b/net/mac80211/michael.c
index 0f844f7895f1..1fcdf38cf60c 100644
--- a/net/mac80211/michael.c
+++ b/net/mac80211/michael.c
@@ -6,85 +6,58 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9
10#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/bitops.h>
11#include <asm/unaligned.h>
11 12
12#include "michael.h" 13#include "michael.h"
13 14
14static inline u32 rotr(u32 val, int bits) 15static void michael_block(struct michael_mic_ctx *mctx, u32 val)
15{
16 return (val >> bits) | (val << (32 - bits));
17}
18
19
20static inline u32 rotl(u32 val, int bits)
21{
22 return (val << bits) | (val >> (32 - bits));
23}
24
25
26static inline u32 xswap(u32 val)
27{
28 return ((val & 0xff00ff00) >> 8) | ((val & 0x00ff00ff) << 8);
29}
30
31
32#define michael_block(l, r) \
33do { \
34 r ^= rotl(l, 17); \
35 l += r; \
36 r ^= xswap(l); \
37 l += r; \
38 r ^= rotl(l, 3); \
39 l += r; \
40 r ^= rotr(l, 2); \
41 l += r; \
42} while (0)
43
44
45static inline u32 michael_get32(u8 *data)
46{ 16{
47 return data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); 17 mctx->l ^= val;
18 mctx->r ^= rol32(mctx->l, 17);
19 mctx->l += mctx->r;
20 mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) |
21 ((mctx->l & 0x00ff00ff) << 8);
22 mctx->l += mctx->r;
23 mctx->r ^= rol32(mctx->l, 3);
24 mctx->l += mctx->r;
25 mctx->r ^= ror32(mctx->l, 2);
26 mctx->l += mctx->r;
48} 27}
49 28
50 29static void michael_mic_hdr(struct michael_mic_ctx *mctx,
51static inline void michael_put32(u32 val, u8 *data) 30 const u8 *key, const u8 *da, const u8 *sa, u8 priority)
52{ 31{
53 data[0] = val & 0xff; 32 mctx->l = get_unaligned_le32(key);
54 data[1] = (val >> 8) & 0xff; 33 mctx->r = get_unaligned_le32(key + 4);
55 data[2] = (val >> 16) & 0xff; 34
56 data[3] = (val >> 24) & 0xff; 35 /*
36 * A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC
37 * calculation, but it is _not_ transmitted
38 */
39 michael_block(mctx, get_unaligned_le32(da));
40 michael_block(mctx, get_unaligned_le16(&da[4]) |
41 (get_unaligned_le16(sa) << 16));
42 michael_block(mctx, get_unaligned_le32(&sa[2]));
43 michael_block(mctx, priority);
57} 44}
58 45
59 46void michael_mic(const u8 *key, const u8 *da, const u8 *sa, u8 priority,
60void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, 47 const u8 *data, size_t data_len, u8 *mic)
61 u8 *data, size_t data_len, u8 *mic)
62{ 48{
63 u32 l, r, val; 49 u32 val;
64 size_t block, blocks, left; 50 size_t block, blocks, left;
51 struct michael_mic_ctx mctx;
65 52
66 l = michael_get32(key); 53 michael_mic_hdr(&mctx, key, da, sa, priority);
67 r = michael_get32(key + 4);
68
69 /* A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC
70 * calculation, but it is _not_ transmitted */
71 l ^= michael_get32(da);
72 michael_block(l, r);
73 l ^= da[4] | (da[5] << 8) | (sa[0] << 16) | (sa[1] << 24);
74 michael_block(l, r);
75 l ^= michael_get32(&sa[2]);
76 michael_block(l, r);
77 l ^= priority;
78 michael_block(l, r);
79 54
80 /* Real data */ 55 /* Real data */
81 blocks = data_len / 4; 56 blocks = data_len / 4;
82 left = data_len % 4; 57 left = data_len % 4;
83 58
84 for (block = 0; block < blocks; block++) { 59 for (block = 0; block < blocks; block++)
85 l ^= michael_get32(&data[block * 4]); 60 michael_block(&mctx, get_unaligned_le32(&data[block * 4]));
86 michael_block(l, r);
87 }
88 61
89 /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make 62 /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make
90 * total length a multiple of 4. */ 63 * total length a multiple of 4. */
@@ -94,11 +67,10 @@ void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority,
94 left--; 67 left--;
95 val |= data[blocks * 4 + left]; 68 val |= data[blocks * 4 + left];
96 } 69 }
97 l ^= val;
98 michael_block(l, r);
99 /* last block is zero, so l ^ 0 = l */
100 michael_block(l, r);
101 70
102 michael_put32(l, mic); 71 michael_block(&mctx, val);
103 michael_put32(r, mic + 4); 72 michael_block(&mctx, 0);
73
74 put_unaligned_le32(mctx.l, mic);
75 put_unaligned_le32(mctx.r, mic + 4);
104} 76}
diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h
index 2e6aebabeea1..69b4501f13ba 100644
--- a/net/mac80211/michael.h
+++ b/net/mac80211/michael.h
@@ -14,7 +14,11 @@
14 14
15#define MICHAEL_MIC_LEN 8 15#define MICHAEL_MIC_LEN 8
16 16
17void michael_mic(u8 *key, u8 *da, u8 *sa, u8 priority, 17struct michael_mic_ctx {
18 u8 *data, size_t data_len, u8 *mic); 18 u32 l, r;
19};
20
21void michael_mic(const u8 *key, const u8 *da, const u8 *sa, u8 priority,
22 const u8 *data, size_t data_len, u8 *mic);
19 23
20#endif /* MICHAEL_H */ 24#endif /* MICHAEL_H */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4d2b582dd055..4a3bddd206d8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -87,6 +87,7 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
87 u8 *ssid, size_t ssid_len); 87 u8 *ssid, size_t ssid_len);
88static int ieee80211_sta_config_auth(struct net_device *dev, 88static int ieee80211_sta_config_auth(struct net_device *dev,
89 struct ieee80211_if_sta *ifsta); 89 struct ieee80211_if_sta *ifsta);
90static void sta_rx_agg_session_timer_expired(unsigned long data);
90 91
91 92
92void ieee802_11_parse_elems(u8 *start, size_t len, 93void ieee802_11_parse_elems(u8 *start, size_t len,
@@ -203,6 +204,25 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
203 elems->perr = pos; 204 elems->perr = pos;
204 elems->perr_len = elen; 205 elems->perr_len = elen;
205 break; 206 break;
207 case WLAN_EID_CHANNEL_SWITCH:
208 elems->ch_switch_elem = pos;
209 elems->ch_switch_elem_len = elen;
210 break;
211 case WLAN_EID_QUIET:
212 if (!elems->quiet_elem) {
213 elems->quiet_elem = pos;
214 elems->quiet_elem_len = elen;
215 }
216 elems->num_of_quiet_elem++;
217 break;
218 case WLAN_EID_COUNTRY:
219 elems->country_elem = pos;
220 elems->country_elem_len = elen;
221 break;
222 case WLAN_EID_PWR_CONSTRAINT:
223 elems->pwr_constr_elem = pos;
224 elems->pwr_constr_elem_len = elen;
225 break;
206 default: 226 default:
207 break; 227 break;
208 } 228 }
@@ -256,19 +276,8 @@ static void ieee80211_sta_def_wmm_params(struct net_device *dev,
256 qparam.cw_max = 1023; 276 qparam.cw_max = 1023;
257 qparam.txop = 0; 277 qparam.txop = 0;
258 278
259 for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++) 279 for (i = 0; i < local_to_hw(local)->queues; i++)
260 local->ops->conf_tx(local_to_hw(local), 280 local->ops->conf_tx(local_to_hw(local), i, &qparam);
261 i + IEEE80211_TX_QUEUE_DATA0,
262 &qparam);
263
264 if (ibss) {
265 /* IBSS uses different parameters for Beacon sending */
266 qparam.cw_min++;
267 qparam.cw_min *= 2;
268 qparam.cw_min--;
269 local->ops->conf_tx(local_to_hw(local),
270 IEEE80211_TX_QUEUE_BEACON, &qparam);
271 }
272 } 281 }
273} 282}
274 283
@@ -282,6 +291,12 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
282 int count; 291 int count;
283 u8 *pos; 292 u8 *pos;
284 293
294 if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
295 return;
296
297 if (!wmm_param)
298 return;
299
285 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 300 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
286 return; 301 return;
287 count = wmm_param[6] & 0x0f; 302 count = wmm_param[6] & 0x0f;
@@ -305,29 +320,25 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
305 320
306 switch (aci) { 321 switch (aci) {
307 case 1: 322 case 1:
308 queue = IEEE80211_TX_QUEUE_DATA3; 323 queue = 3;
309 if (acm) { 324 if (acm)
310 local->wmm_acm |= BIT(0) | BIT(3); 325 local->wmm_acm |= BIT(0) | BIT(3);
311 }
312 break; 326 break;
313 case 2: 327 case 2:
314 queue = IEEE80211_TX_QUEUE_DATA1; 328 queue = 1;
315 if (acm) { 329 if (acm)
316 local->wmm_acm |= BIT(4) | BIT(5); 330 local->wmm_acm |= BIT(4) | BIT(5);
317 }
318 break; 331 break;
319 case 3: 332 case 3:
320 queue = IEEE80211_TX_QUEUE_DATA0; 333 queue = 0;
321 if (acm) { 334 if (acm)
322 local->wmm_acm |= BIT(6) | BIT(7); 335 local->wmm_acm |= BIT(6) | BIT(7);
323 }
324 break; 336 break;
325 case 0: 337 case 0:
326 default: 338 default:
327 queue = IEEE80211_TX_QUEUE_DATA2; 339 queue = 2;
328 if (acm) { 340 if (acm)
329 local->wmm_acm |= BIT(1) | BIT(2); 341 local->wmm_acm |= BIT(1) | BIT(2);
330 }
331 break; 342 break;
332 } 343 }
333 344
@@ -335,7 +346,7 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
335 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 346 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
336 params.cw_min = ecw2cw(pos[1] & 0x0f); 347 params.cw_min = ecw2cw(pos[1] & 0x0f);
337 params.txop = pos[2] | (pos[3] << 8); 348 params.txop = pos[2] | (pos[3] << 8);
338#ifdef CONFIG_MAC80211_DEBUG 349#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
339 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 350 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
340 "cWmin=%d cWmax=%d txop=%d\n", 351 "cWmin=%d cWmax=%d txop=%d\n",
341 dev->name, queue, aci, acm, params.aifs, params.cw_min, 352 dev->name, queue, aci, acm, params.aifs, params.cw_min,
@@ -360,6 +371,7 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
360 u32 changed = 0; 371 u32 changed = 0;
361 372
362 if (use_protection != bss_conf->use_cts_prot) { 373 if (use_protection != bss_conf->use_cts_prot) {
374#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
363 if (net_ratelimit()) { 375 if (net_ratelimit()) {
364 printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" 376 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
365 "%s)\n", 377 "%s)\n",
@@ -367,11 +379,13 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
367 use_protection ? "enabled" : "disabled", 379 use_protection ? "enabled" : "disabled",
368 print_mac(mac, ifsta->bssid)); 380 print_mac(mac, ifsta->bssid));
369 } 381 }
382#endif
370 bss_conf->use_cts_prot = use_protection; 383 bss_conf->use_cts_prot = use_protection;
371 changed |= BSS_CHANGED_ERP_CTS_PROT; 384 changed |= BSS_CHANGED_ERP_CTS_PROT;
372 } 385 }
373 386
374 if (use_short_preamble != bss_conf->use_short_preamble) { 387 if (use_short_preamble != bss_conf->use_short_preamble) {
388#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
375 if (net_ratelimit()) { 389 if (net_ratelimit()) {
376 printk(KERN_DEBUG "%s: switched to %s barker preamble" 390 printk(KERN_DEBUG "%s: switched to %s barker preamble"
377 " (BSSID=%s)\n", 391 " (BSSID=%s)\n",
@@ -379,6 +393,7 @@ static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
379 use_short_preamble ? "short" : "long", 393 use_short_preamble ? "short" : "long",
380 print_mac(mac, ifsta->bssid)); 394 print_mac(mac, ifsta->bssid));
381 } 395 }
396#endif
382 bss_conf->use_short_preamble = use_short_preamble; 397 bss_conf->use_short_preamble = use_short_preamble;
383 changed |= BSS_CHANGED_ERP_PREAMBLE; 398 changed |= BSS_CHANGED_ERP_PREAMBLE;
384 } 399 }
@@ -586,7 +601,7 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
586 int encrypt) 601 int encrypt)
587{ 602{
588 struct ieee80211_sub_if_data *sdata; 603 struct ieee80211_sub_if_data *sdata;
589 struct ieee80211_tx_packet_data *pkt_data; 604 struct ieee80211_tx_info *info;
590 605
591 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 606 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
592 skb->dev = sdata->local->mdev; 607 skb->dev = sdata->local->mdev;
@@ -594,11 +609,11 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
594 skb_set_network_header(skb, 0); 609 skb_set_network_header(skb, 0);
595 skb_set_transport_header(skb, 0); 610 skb_set_transport_header(skb, 0);
596 611
597 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 612 info = IEEE80211_SKB_CB(skb);
598 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); 613 memset(info, 0, sizeof(struct ieee80211_tx_info));
599 pkt_data->ifindex = sdata->dev->ifindex; 614 info->control.ifindex = sdata->dev->ifindex;
600 if (!encrypt) 615 if (!encrypt)
601 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; 616 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
602 617
603 dev_queue_xmit(skb); 618 dev_queue_xmit(skb);
604} 619}
@@ -727,9 +742,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
727 if (bss) { 742 if (bss) {
728 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 743 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
729 capab |= WLAN_CAPABILITY_PRIVACY; 744 capab |= WLAN_CAPABILITY_PRIVACY;
730 if (bss->wmm_ie) { 745 if (bss->wmm_ie)
731 wmm = 1; 746 wmm = 1;
732 }
733 747
734 /* get all rates supported by the device and the AP as 748 /* get all rates supported by the device and the AP as
735 * some APs don't like getting a superset of their rates 749 * some APs don't like getting a superset of their rates
@@ -737,6 +751,10 @@ static void ieee80211_send_assoc(struct net_device *dev,
737 * b-only mode) */ 751 * b-only mode) */
738 rates_len = ieee80211_compatible_rates(bss, sband, &rates); 752 rates_len = ieee80211_compatible_rates(bss, sband, &rates);
739 753
754 if ((bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
755 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
756 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
757
740 ieee80211_rx_bss_put(dev, bss); 758 ieee80211_rx_bss_put(dev, bss);
741 } else { 759 } else {
742 rates = ~0; 760 rates = ~0;
@@ -804,6 +822,26 @@ static void ieee80211_send_assoc(struct net_device *dev,
804 } 822 }
805 } 823 }
806 824
825 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
826 /* 1. power capabilities */
827 pos = skb_put(skb, 4);
828 *pos++ = WLAN_EID_PWR_CAPABILITY;
829 *pos++ = 2;
830 *pos++ = 0; /* min tx power */
831 *pos++ = local->hw.conf.channel->max_power; /* max tx power */
832
833 /* 2. supported channels */
834 /* TODO: get this in reg domain format */
835 pos = skb_put(skb, 2 * sband->n_channels + 2);
836 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
837 *pos++ = 2 * sband->n_channels;
838 for (i = 0; i < sband->n_channels; i++) {
839 *pos++ = ieee80211_frequency_to_channel(
840 sband->channels[i].center_freq);
841 *pos++ = 1; /* one channel in the subband*/
842 }
843 }
844
807 if (ifsta->extra_ie) { 845 if (ifsta->extra_ie) {
808 pos = skb_put(skb, ifsta->extra_ie_len); 846 pos = skb_put(skb, ifsta->extra_ie_len);
809 memcpy(pos, ifsta->extra_ie, ifsta->extra_ie_len); 847 memcpy(pos, ifsta->extra_ie, ifsta->extra_ie_len);
@@ -821,9 +859,32 @@ static void ieee80211_send_assoc(struct net_device *dev,
821 *pos++ = 1; /* WME ver */ 859 *pos++ = 1; /* WME ver */
822 *pos++ = 0; 860 *pos++ = 0;
823 } 861 }
862
824 /* wmm support is a must to HT */ 863 /* wmm support is a must to HT */
825 if (wmm && sband->ht_info.ht_supported) { 864 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
826 __le16 tmp = cpu_to_le16(sband->ht_info.cap); 865 sband->ht_info.ht_supported && bss->ht_add_ie) {
866 struct ieee80211_ht_addt_info *ht_add_info =
867 (struct ieee80211_ht_addt_info *)bss->ht_add_ie;
868 u16 cap = sband->ht_info.cap;
869 __le16 tmp;
870 u32 flags = local->hw.conf.channel->flags;
871
872 switch (ht_add_info->ht_param & IEEE80211_HT_IE_CHA_SEC_OFFSET) {
873 case IEEE80211_HT_IE_CHA_SEC_ABOVE:
874 if (flags & IEEE80211_CHAN_NO_FAT_ABOVE) {
875 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH;
876 cap &= ~IEEE80211_HT_CAP_SGI_40;
877 }
878 break;
879 case IEEE80211_HT_IE_CHA_SEC_BELOW:
880 if (flags & IEEE80211_CHAN_NO_FAT_BELOW) {
881 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH;
882 cap &= ~IEEE80211_HT_CAP_SGI_40;
883 }
884 break;
885 }
886
887 tmp = cpu_to_le16(cap);
827 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); 888 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
828 *pos++ = WLAN_EID_HT_CAPABILITY; 889 *pos++ = WLAN_EID_HT_CAPABILITY;
829 *pos++ = sizeof(struct ieee80211_ht_cap); 890 *pos++ = sizeof(struct ieee80211_ht_cap);
@@ -1118,14 +1179,10 @@ static void ieee80211_auth_challenge(struct net_device *dev,
1118 u8 *pos; 1179 u8 *pos;
1119 struct ieee802_11_elems elems; 1180 struct ieee802_11_elems elems;
1120 1181
1121 printk(KERN_DEBUG "%s: replying to auth challenge\n", dev->name);
1122 pos = mgmt->u.auth.variable; 1182 pos = mgmt->u.auth.variable;
1123 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 1183 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1124 if (!elems.challenge) { 1184 if (!elems.challenge)
1125 printk(KERN_DEBUG "%s: no challenge IE in shared key auth "
1126 "frame\n", dev->name);
1127 return; 1185 return;
1128 }
1129 ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2, 1186 ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2,
1130 elems.challenge_len + 2, 1); 1187 elems.challenge_len + 2, 1);
1131} 1188}
@@ -1141,8 +1198,8 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1141 struct ieee80211_mgmt *mgmt; 1198 struct ieee80211_mgmt *mgmt;
1142 u16 capab; 1199 u16 capab;
1143 1200
1144 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1201 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1145 sizeof(mgmt->u.action.u.addba_resp)); 1202
1146 if (!skb) { 1203 if (!skb) {
1147 printk(KERN_DEBUG "%s: failed to allocate buffer " 1204 printk(KERN_DEBUG "%s: failed to allocate buffer "
1148 "for addba resp frame\n", dev->name); 1205 "for addba resp frame\n", dev->name);
@@ -1190,9 +1247,7 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
1190 struct ieee80211_mgmt *mgmt; 1247 struct ieee80211_mgmt *mgmt;
1191 u16 capab; 1248 u16 capab;
1192 1249
1193 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1250 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1194 sizeof(mgmt->u.action.u.addba_req));
1195
1196 1251
1197 if (!skb) { 1252 if (!skb) {
1198 printk(KERN_ERR "%s: failed to allocate buffer " 1253 printk(KERN_ERR "%s: failed to allocate buffer "
@@ -1293,7 +1348,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1293 1348
1294 1349
1295 /* examine state machine */ 1350 /* examine state machine */
1296 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1351 spin_lock_bh(&sta->lock);
1297 1352
1298 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { 1353 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
1299#ifdef CONFIG_MAC80211_HT_DEBUG 1354#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -1309,9 +1364,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1309 sta->ampdu_mlme.tid_rx[tid] = 1364 sta->ampdu_mlme.tid_rx[tid] =
1310 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); 1365 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
1311 if (!sta->ampdu_mlme.tid_rx[tid]) { 1366 if (!sta->ampdu_mlme.tid_rx[tid]) {
1367#ifdef CONFIG_MAC80211_HT_DEBUG
1312 if (net_ratelimit()) 1368 if (net_ratelimit())
1313 printk(KERN_ERR "allocate rx mlme to tid %d failed\n", 1369 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
1314 tid); 1370 tid);
1371#endif
1315 goto end; 1372 goto end;
1316 } 1373 }
1317 /* rx timer */ 1374 /* rx timer */
@@ -1327,9 +1384,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1327 tid_agg_rx->reorder_buf = 1384 tid_agg_rx->reorder_buf =
1328 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); 1385 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
1329 if (!tid_agg_rx->reorder_buf) { 1386 if (!tid_agg_rx->reorder_buf) {
1387#ifdef CONFIG_MAC80211_HT_DEBUG
1330 if (net_ratelimit()) 1388 if (net_ratelimit())
1331 printk(KERN_ERR "can not allocate reordering buffer " 1389 printk(KERN_ERR "can not allocate reordering buffer "
1332 "to tid %d\n", tid); 1390 "to tid %d\n", tid);
1391#endif
1333 kfree(sta->ampdu_mlme.tid_rx[tid]); 1392 kfree(sta->ampdu_mlme.tid_rx[tid]);
1334 goto end; 1393 goto end;
1335 } 1394 }
@@ -1360,7 +1419,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1360 tid_agg_rx->stored_mpdu_num = 0; 1419 tid_agg_rx->stored_mpdu_num = 0;
1361 status = WLAN_STATUS_SUCCESS; 1420 status = WLAN_STATUS_SUCCESS;
1362end: 1421end:
1363 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1422 spin_unlock_bh(&sta->lock);
1364 1423
1365end_no_lock: 1424end_no_lock:
1366 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, 1425 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
@@ -1392,18 +1451,16 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1392 1451
1393 state = &sta->ampdu_mlme.tid_state_tx[tid]; 1452 state = &sta->ampdu_mlme.tid_state_tx[tid];
1394 1453
1395 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1454 spin_lock_bh(&sta->lock);
1396 1455
1397 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 1456 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1398 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1457 spin_unlock_bh(&sta->lock);
1399 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
1400 "%d\n", *state);
1401 goto addba_resp_exit; 1458 goto addba_resp_exit;
1402 } 1459 }
1403 1460
1404 if (mgmt->u.action.u.addba_resp.dialog_token != 1461 if (mgmt->u.action.u.addba_resp.dialog_token !=
1405 sta->ampdu_mlme.tid_tx[tid]->dialog_token) { 1462 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
1406 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1463 spin_unlock_bh(&sta->lock);
1407#ifdef CONFIG_MAC80211_HT_DEBUG 1464#ifdef CONFIG_MAC80211_HT_DEBUG
1408 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 1465 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1409#endif /* CONFIG_MAC80211_HT_DEBUG */ 1466#endif /* CONFIG_MAC80211_HT_DEBUG */
@@ -1416,26 +1473,18 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1416#endif /* CONFIG_MAC80211_HT_DEBUG */ 1473#endif /* CONFIG_MAC80211_HT_DEBUG */
1417 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 1474 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1418 == WLAN_STATUS_SUCCESS) { 1475 == WLAN_STATUS_SUCCESS) {
1419 if (*state & HT_ADDBA_RECEIVED_MSK)
1420 printk(KERN_DEBUG "double addBA response\n");
1421
1422 *state |= HT_ADDBA_RECEIVED_MSK; 1476 *state |= HT_ADDBA_RECEIVED_MSK;
1423 sta->ampdu_mlme.addba_req_num[tid] = 0; 1477 sta->ampdu_mlme.addba_req_num[tid] = 0;
1424 1478
1425 if (*state == HT_AGG_STATE_OPERATIONAL) { 1479 if (*state == HT_AGG_STATE_OPERATIONAL)
1426 printk(KERN_DEBUG "Aggregation on for tid %d \n", tid);
1427 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 1480 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
1428 }
1429 1481
1430 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1482 spin_unlock_bh(&sta->lock);
1431 printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid);
1432 } else { 1483 } else {
1433 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
1434
1435 sta->ampdu_mlme.addba_req_num[tid]++; 1484 sta->ampdu_mlme.addba_req_num[tid]++;
1436 /* this will allow the state check in stop_BA_session */ 1485 /* this will allow the state check in stop_BA_session */
1437 *state = HT_AGG_STATE_OPERATIONAL; 1486 *state = HT_AGG_STATE_OPERATIONAL;
1438 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1487 spin_unlock_bh(&sta->lock);
1439 ieee80211_stop_tx_ba_session(hw, sta->addr, tid, 1488 ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
1440 WLAN_BACK_INITIATOR); 1489 WLAN_BACK_INITIATOR);
1441 } 1490 }
@@ -1454,8 +1503,7 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1454 struct ieee80211_mgmt *mgmt; 1503 struct ieee80211_mgmt *mgmt;
1455 u16 params; 1504 u16 params;
1456 1505
1457 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 + 1506 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1458 sizeof(mgmt->u.action.u.delba));
1459 1507
1460 if (!skb) { 1508 if (!skb) {
1461 printk(KERN_ERR "%s: failed to allocate buffer " 1509 printk(KERN_ERR "%s: failed to allocate buffer "
@@ -1506,17 +1554,17 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1506 } 1554 }
1507 1555
1508 /* check if TID is in operational state */ 1556 /* check if TID is in operational state */
1509 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1557 spin_lock_bh(&sta->lock);
1510 if (sta->ampdu_mlme.tid_state_rx[tid] 1558 if (sta->ampdu_mlme.tid_state_rx[tid]
1511 != HT_AGG_STATE_OPERATIONAL) { 1559 != HT_AGG_STATE_OPERATIONAL) {
1512 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1560 spin_unlock_bh(&sta->lock);
1513 rcu_read_unlock(); 1561 rcu_read_unlock();
1514 return; 1562 return;
1515 } 1563 }
1516 sta->ampdu_mlme.tid_state_rx[tid] = 1564 sta->ampdu_mlme.tid_state_rx[tid] =
1517 HT_AGG_STATE_REQ_STOP_BA_MSK | 1565 HT_AGG_STATE_REQ_STOP_BA_MSK |
1518 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 1566 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
1519 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1567 spin_unlock_bh(&sta->lock);
1520 1568
1521 /* stop HW Rx aggregation. ampdu_action existence 1569 /* stop HW Rx aggregation. ampdu_action existence
1522 * already verified in session init so we add the BUG_ON */ 1570 * already verified in session init so we add the BUG_ON */
@@ -1531,7 +1579,7 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1531 ra, tid, NULL); 1579 ra, tid, NULL);
1532 if (ret) 1580 if (ret)
1533 printk(KERN_DEBUG "HW problem - can not stop rx " 1581 printk(KERN_DEBUG "HW problem - can not stop rx "
1534 "aggergation for tid %d\n", tid); 1582 "aggregation for tid %d\n", tid);
1535 1583
1536 /* shutdown timer has not expired */ 1584 /* shutdown timer has not expired */
1537 if (initiator != WLAN_BACK_TIMER) 1585 if (initiator != WLAN_BACK_TIMER)
@@ -1593,10 +1641,10 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1593 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, 1641 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
1594 WLAN_BACK_INITIATOR, 0); 1642 WLAN_BACK_INITIATOR, 0);
1595 else { /* WLAN_BACK_RECIPIENT */ 1643 else { /* WLAN_BACK_RECIPIENT */
1596 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1644 spin_lock_bh(&sta->lock);
1597 sta->ampdu_mlme.tid_state_tx[tid] = 1645 sta->ampdu_mlme.tid_state_tx[tid] =
1598 HT_AGG_STATE_OPERATIONAL; 1646 HT_AGG_STATE_OPERATIONAL;
1599 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1647 spin_unlock_bh(&sta->lock);
1600 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, 1648 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
1601 WLAN_BACK_RECIPIENT); 1649 WLAN_BACK_RECIPIENT);
1602 } 1650 }
@@ -1633,20 +1681,24 @@ void sta_addba_resp_timer_expired(unsigned long data)
1633 1681
1634 state = &sta->ampdu_mlme.tid_state_tx[tid]; 1682 state = &sta->ampdu_mlme.tid_state_tx[tid];
1635 /* check if the TID waits for addBA response */ 1683 /* check if the TID waits for addBA response */
1636 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1684 spin_lock_bh(&sta->lock);
1637 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 1685 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1638 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1686 spin_unlock_bh(&sta->lock);
1639 *state = HT_AGG_STATE_IDLE; 1687 *state = HT_AGG_STATE_IDLE;
1688#ifdef CONFIG_MAC80211_HT_DEBUG
1640 printk(KERN_DEBUG "timer expired on tid %d but we are not " 1689 printk(KERN_DEBUG "timer expired on tid %d but we are not "
1641 "expecting addBA response there", tid); 1690 "expecting addBA response there", tid);
1691#endif
1642 goto timer_expired_exit; 1692 goto timer_expired_exit;
1643 } 1693 }
1644 1694
1695#ifdef CONFIG_MAC80211_HT_DEBUG
1645 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); 1696 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
1697#endif
1646 1698
1647 /* go through the state check in stop_BA_session */ 1699 /* go through the state check in stop_BA_session */
1648 *state = HT_AGG_STATE_OPERATIONAL; 1700 *state = HT_AGG_STATE_OPERATIONAL;
1649 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1701 spin_unlock_bh(&sta->lock);
1650 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, 1702 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
1651 WLAN_BACK_INITIATOR); 1703 WLAN_BACK_INITIATOR);
1652 1704
@@ -1659,7 +1711,7 @@ timer_expired_exit:
1659 * resetting it after each frame that arrives from the originator. 1711 * resetting it after each frame that arrives from the originator.
1660 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. 1712 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
1661 */ 1713 */
1662void sta_rx_agg_session_timer_expired(unsigned long data) 1714static void sta_rx_agg_session_timer_expired(unsigned long data)
1663{ 1715{
1664 /* not an elegant detour, but there is no choice as the timer passes 1716 /* not an elegant detour, but there is no choice as the timer passes
1665 * only one argument, and various sta_info are needed here, so init 1717 * only one argument, and various sta_info are needed here, so init
@@ -1670,7 +1722,9 @@ void sta_rx_agg_session_timer_expired(unsigned long data)
1670 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 1722 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
1671 timer_to_tid[0]); 1723 timer_to_tid[0]);
1672 1724
1725#ifdef CONFIG_MAC80211_HT_DEBUG
1673 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 1726 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
1727#endif
1674 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, 1728 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
1675 (u16)*ptid, WLAN_BACK_TIMER, 1729 (u16)*ptid, WLAN_BACK_TIMER,
1676 WLAN_REASON_QSTA_TIMEOUT); 1730 WLAN_REASON_QSTA_TIMEOUT);
@@ -1690,6 +1744,71 @@ void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr)
1690 } 1744 }
1691} 1745}
1692 1746
1747static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
1748 struct ieee80211_msrment_ie *request_ie,
1749 const u8 *da, const u8 *bssid,
1750 u8 dialog_token)
1751{
1752 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1753 struct sk_buff *skb;
1754 struct ieee80211_mgmt *msr_report;
1755
1756 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
1757 sizeof(struct ieee80211_msrment_ie));
1758
1759 if (!skb) {
1760 printk(KERN_ERR "%s: failed to allocate buffer for "
1761 "measurement report frame\n", dev->name);
1762 return;
1763 }
1764
1765 skb_reserve(skb, local->hw.extra_tx_headroom);
1766 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
1767 memset(msr_report, 0, 24);
1768 memcpy(msr_report->da, da, ETH_ALEN);
1769 memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN);
1770 memcpy(msr_report->bssid, bssid, ETH_ALEN);
1771 msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1772 IEEE80211_STYPE_ACTION);
1773
1774 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
1775 msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
1776 msr_report->u.action.u.measurement.action_code =
1777 WLAN_ACTION_SPCT_MSR_RPRT;
1778 msr_report->u.action.u.measurement.dialog_token = dialog_token;
1779
1780 msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
1781 msr_report->u.action.u.measurement.length =
1782 sizeof(struct ieee80211_msrment_ie);
1783
1784 memset(&msr_report->u.action.u.measurement.msr_elem, 0,
1785 sizeof(struct ieee80211_msrment_ie));
1786 msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
1787 msr_report->u.action.u.measurement.msr_elem.mode |=
1788 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
1789 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
1790
1791 ieee80211_sta_tx(dev, skb, 0);
1792}
1793
1794static void ieee80211_sta_process_measurement_req(struct net_device *dev,
1795 struct ieee80211_mgmt *mgmt,
1796 size_t len)
1797{
1798 /*
1799 * Ignoring measurement request is spec violation.
1800 * Mandatory measurements must be reported optional
1801 * measurements might be refused or reported incapable
1802 * For now just refuse
1803 * TODO: Answer basic measurement as unmeasured
1804 */
1805 ieee80211_send_refuse_measurement_request(dev,
1806 &mgmt->u.action.u.measurement.msr_elem,
1807 mgmt->sa, mgmt->bssid,
1808 mgmt->u.action.u.measurement.dialog_token);
1809}
1810
1811
1693static void ieee80211_rx_mgmt_auth(struct net_device *dev, 1812static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1694 struct ieee80211_if_sta *ifsta, 1813 struct ieee80211_if_sta *ifsta,
1695 struct ieee80211_mgmt *mgmt, 1814 struct ieee80211_mgmt *mgmt,
@@ -1700,73 +1819,41 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1700 DECLARE_MAC_BUF(mac); 1819 DECLARE_MAC_BUF(mac);
1701 1820
1702 if (ifsta->state != IEEE80211_AUTHENTICATE && 1821 if (ifsta->state != IEEE80211_AUTHENTICATE &&
1703 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { 1822 sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1704 printk(KERN_DEBUG "%s: authentication frame received from "
1705 "%s, but not in authenticate state - ignored\n",
1706 dev->name, print_mac(mac, mgmt->sa));
1707 return; 1823 return;
1708 }
1709 1824
1710 if (len < 24 + 6) { 1825 if (len < 24 + 6)
1711 printk(KERN_DEBUG "%s: too short (%zd) authentication frame "
1712 "received from %s - ignored\n",
1713 dev->name, len, print_mac(mac, mgmt->sa));
1714 return; 1826 return;
1715 }
1716 1827
1717 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1828 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
1718 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 1829 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1719 printk(KERN_DEBUG "%s: authentication frame received from "
1720 "unknown AP (SA=%s BSSID=%s) - "
1721 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1722 print_mac(mac, mgmt->bssid));
1723 return; 1830 return;
1724 }
1725 1831
1726 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1832 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
1727 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) { 1833 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
1728 printk(KERN_DEBUG "%s: authentication frame received from "
1729 "unknown BSSID (SA=%s BSSID=%s) - "
1730 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1731 print_mac(mac, mgmt->bssid));
1732 return; 1834 return;
1733 }
1734 1835
1735 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 1836 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1736 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 1837 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1737 status_code = le16_to_cpu(mgmt->u.auth.status_code); 1838 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1738 1839
1739 printk(KERN_DEBUG "%s: RX authentication from %s (alg=%d "
1740 "transaction=%d status=%d)\n",
1741 dev->name, print_mac(mac, mgmt->sa), auth_alg,
1742 auth_transaction, status_code);
1743
1744 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 1840 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
1745 /* IEEE 802.11 standard does not require authentication in IBSS 1841 /*
1842 * IEEE 802.11 standard does not require authentication in IBSS
1746 * networks and most implementations do not seem to use it. 1843 * networks and most implementations do not seem to use it.
1747 * However, try to reply to authentication attempts if someone 1844 * However, try to reply to authentication attempts if someone
1748 * has actually implemented this. 1845 * has actually implemented this.
1749 * TODO: Could implement shared key authentication. */ 1846 */
1750 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) { 1847 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
1751 printk(KERN_DEBUG "%s: unexpected IBSS authentication "
1752 "frame (alg=%d transaction=%d)\n",
1753 dev->name, auth_alg, auth_transaction);
1754 return; 1848 return;
1755 }
1756 ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); 1849 ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0);
1757 } 1850 }
1758 1851
1759 if (auth_alg != ifsta->auth_alg || 1852 if (auth_alg != ifsta->auth_alg ||
1760 auth_transaction != ifsta->auth_transaction) { 1853 auth_transaction != ifsta->auth_transaction)
1761 printk(KERN_DEBUG "%s: unexpected authentication frame "
1762 "(alg=%d transaction=%d)\n",
1763 dev->name, auth_alg, auth_transaction);
1764 return; 1854 return;
1765 }
1766 1855
1767 if (status_code != WLAN_STATUS_SUCCESS) { 1856 if (status_code != WLAN_STATUS_SUCCESS) {
1768 printk(KERN_DEBUG "%s: AP denied authentication (auth_alg=%d "
1769 "code=%d)\n", dev->name, ifsta->auth_alg, status_code);
1770 if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { 1857 if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
1771 u8 algs[3]; 1858 u8 algs[3];
1772 const int num_algs = ARRAY_SIZE(algs); 1859 const int num_algs = ARRAY_SIZE(algs);
@@ -1795,9 +1882,6 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1795 !ieee80211_sta_wep_configured(dev)) 1882 !ieee80211_sta_wep_configured(dev))
1796 continue; 1883 continue;
1797 ifsta->auth_alg = algs[pos]; 1884 ifsta->auth_alg = algs[pos];
1798 printk(KERN_DEBUG "%s: set auth_alg=%d for "
1799 "next try\n",
1800 dev->name, ifsta->auth_alg);
1801 break; 1885 break;
1802 } 1886 }
1803 } 1887 }
@@ -1827,30 +1911,16 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev,
1827 u16 reason_code; 1911 u16 reason_code;
1828 DECLARE_MAC_BUF(mac); 1912 DECLARE_MAC_BUF(mac);
1829 1913
1830 if (len < 24 + 2) { 1914 if (len < 24 + 2)
1831 printk(KERN_DEBUG "%s: too short (%zd) deauthentication frame "
1832 "received from %s - ignored\n",
1833 dev->name, len, print_mac(mac, mgmt->sa));
1834 return; 1915 return;
1835 }
1836 1916
1837 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 1917 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN))
1838 printk(KERN_DEBUG "%s: deauthentication frame received from "
1839 "unknown AP (SA=%s BSSID=%s) - "
1840 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1841 print_mac(mac, mgmt->bssid));
1842 return; 1918 return;
1843 }
1844 1919
1845 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 1920 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1846 1921
1847 printk(KERN_DEBUG "%s: RX deauthentication from %s" 1922 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
1848 " (reason=%d)\n",
1849 dev->name, print_mac(mac, mgmt->sa), reason_code);
1850
1851 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) {
1852 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); 1923 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name);
1853 }
1854 1924
1855 if (ifsta->state == IEEE80211_AUTHENTICATE || 1925 if (ifsta->state == IEEE80211_AUTHENTICATE ||
1856 ifsta->state == IEEE80211_ASSOCIATE || 1926 ifsta->state == IEEE80211_ASSOCIATE ||
@@ -1873,27 +1943,14 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev,
1873 u16 reason_code; 1943 u16 reason_code;
1874 DECLARE_MAC_BUF(mac); 1944 DECLARE_MAC_BUF(mac);
1875 1945
1876 if (len < 24 + 2) { 1946 if (len < 24 + 2)
1877 printk(KERN_DEBUG "%s: too short (%zd) disassociation frame "
1878 "received from %s - ignored\n",
1879 dev->name, len, print_mac(mac, mgmt->sa));
1880 return; 1947 return;
1881 }
1882 1948
1883 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 1949 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN))
1884 printk(KERN_DEBUG "%s: disassociation frame received from "
1885 "unknown AP (SA=%s BSSID=%s) - "
1886 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1887 print_mac(mac, mgmt->bssid));
1888 return; 1950 return;
1889 }
1890 1951
1891 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1952 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1892 1953
1893 printk(KERN_DEBUG "%s: RX disassociation from %s"
1894 " (reason=%d)\n",
1895 dev->name, print_mac(mac, mgmt->sa), reason_code);
1896
1897 if (ifsta->flags & IEEE80211_STA_ASSOCIATED) 1954 if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
1898 printk(KERN_DEBUG "%s: disassociated\n", dev->name); 1955 printk(KERN_DEBUG "%s: disassociated\n", dev->name);
1899 1956
@@ -1929,27 +1986,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1929 /* AssocResp and ReassocResp have identical structure, so process both 1986 /* AssocResp and ReassocResp have identical structure, so process both
1930 * of them in this function. */ 1987 * of them in this function. */
1931 1988
1932 if (ifsta->state != IEEE80211_ASSOCIATE) { 1989 if (ifsta->state != IEEE80211_ASSOCIATE)
1933 printk(KERN_DEBUG "%s: association frame received from "
1934 "%s, but not in associate state - ignored\n",
1935 dev->name, print_mac(mac, mgmt->sa));
1936 return; 1990 return;
1937 }
1938 1991
1939 if (len < 24 + 6) { 1992 if (len < 24 + 6)
1940 printk(KERN_DEBUG "%s: too short (%zd) association frame "
1941 "received from %s - ignored\n",
1942 dev->name, len, print_mac(mac, mgmt->sa));
1943 return; 1993 return;
1944 }
1945 1994
1946 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { 1995 if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1947 printk(KERN_DEBUG "%s: association frame received from "
1948 "unknown AP (SA=%s BSSID=%s) - "
1949 "ignored\n", dev->name, print_mac(mac, mgmt->sa),
1950 print_mac(mac, mgmt->bssid));
1951 return; 1996 return;
1952 }
1953 1997
1954 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 1998 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1955 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); 1999 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
@@ -2013,8 +2057,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2013 local->hw.conf.channel->center_freq, 2057 local->hw.conf.channel->center_freq,
2014 ifsta->ssid, ifsta->ssid_len); 2058 ifsta->ssid, ifsta->ssid_len);
2015 if (bss) { 2059 if (bss) {
2016 sta->last_rssi = bss->rssi;
2017 sta->last_signal = bss->signal; 2060 sta->last_signal = bss->signal;
2061 sta->last_qual = bss->qual;
2018 sta->last_noise = bss->noise; 2062 sta->last_noise = bss->noise;
2019 ieee80211_rx_bss_put(dev, bss); 2063 ieee80211_rx_bss_put(dev, bss);
2020 } 2064 }
@@ -2038,8 +2082,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2038 * to between the sta_info_alloc() and sta_info_insert() above. 2082 * to between the sta_info_alloc() and sta_info_insert() above.
2039 */ 2083 */
2040 2084
2041 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP | 2085 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP |
2042 WLAN_STA_AUTHORIZED; 2086 WLAN_STA_AUTHORIZED);
2043 2087
2044 rates = 0; 2088 rates = 0;
2045 basic_rates = 0; 2089 basic_rates = 0;
@@ -2083,7 +2127,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2083 else 2127 else
2084 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; 2128 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
2085 2129
2086 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param) { 2130 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
2131 (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
2087 struct ieee80211_ht_bss_info bss_info; 2132 struct ieee80211_ht_bss_info bss_info;
2088 ieee80211_ht_cap_ie_to_ht_info( 2133 ieee80211_ht_cap_ie_to_ht_info(
2089 (struct ieee80211_ht_cap *) 2134 (struct ieee80211_ht_cap *)
@@ -2096,8 +2141,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2096 2141
2097 rate_control_rate_init(sta, local); 2142 rate_control_rate_init(sta, local);
2098 2143
2099 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { 2144 if (elems.wmm_param) {
2100 sta->flags |= WLAN_STA_WME; 2145 set_sta_flags(sta, WLAN_STA_WME);
2101 rcu_read_unlock(); 2146 rcu_read_unlock();
2102 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 2147 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2103 elems.wmm_param_len); 2148 elems.wmm_param_len);
@@ -2281,6 +2326,7 @@ static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
2281 kfree(bss->rsn_ie); 2326 kfree(bss->rsn_ie);
2282 kfree(bss->wmm_ie); 2327 kfree(bss->wmm_ie);
2283 kfree(bss->ht_ie); 2328 kfree(bss->ht_ie);
2329 kfree(bss->ht_add_ie);
2284 kfree(bss_mesh_id(bss)); 2330 kfree(bss_mesh_id(bss));
2285 kfree(bss_mesh_cfg(bss)); 2331 kfree(bss_mesh_cfg(bss));
2286 kfree(bss); 2332 kfree(bss);
@@ -2331,7 +2377,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2331 int res, rates, i, j; 2377 int res, rates, i, j;
2332 struct sk_buff *skb; 2378 struct sk_buff *skb;
2333 struct ieee80211_mgmt *mgmt; 2379 struct ieee80211_mgmt *mgmt;
2334 struct ieee80211_tx_control control; 2380 struct ieee80211_tx_info *control;
2335 struct rate_selection ratesel; 2381 struct rate_selection ratesel;
2336 u8 *pos; 2382 u8 *pos;
2337 struct ieee80211_sub_if_data *sdata; 2383 struct ieee80211_sub_if_data *sdata;
@@ -2382,6 +2428,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2382 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 2428 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2383 mgmt->u.beacon.beacon_int = 2429 mgmt->u.beacon.beacon_int =
2384 cpu_to_le16(local->hw.conf.beacon_int); 2430 cpu_to_le16(local->hw.conf.beacon_int);
2431 mgmt->u.beacon.timestamp = cpu_to_le64(bss->timestamp);
2385 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability); 2432 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability);
2386 2433
2387 pos = skb_put(skb, 2 + ifsta->ssid_len); 2434 pos = skb_put(skb, 2 + ifsta->ssid_len);
@@ -2419,21 +2466,22 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2419 memcpy(pos, &bss->supp_rates[8], rates); 2466 memcpy(pos, &bss->supp_rates[8], rates);
2420 } 2467 }
2421 2468
2422 memset(&control, 0, sizeof(control)); 2469 control = IEEE80211_SKB_CB(skb);
2470
2423 rate_control_get_rate(dev, sband, skb, &ratesel); 2471 rate_control_get_rate(dev, sband, skb, &ratesel);
2424 if (!ratesel.rate) { 2472 if (ratesel.rate_idx < 0) {
2425 printk(KERN_DEBUG "%s: Failed to determine TX rate " 2473 printk(KERN_DEBUG "%s: Failed to determine TX rate "
2426 "for IBSS beacon\n", dev->name); 2474 "for IBSS beacon\n", dev->name);
2427 break; 2475 break;
2428 } 2476 }
2429 control.vif = &sdata->vif; 2477 control->control.vif = &sdata->vif;
2430 control.tx_rate = ratesel.rate; 2478 control->tx_rate_idx = ratesel.rate_idx;
2431 if (sdata->bss_conf.use_short_preamble && 2479 if (sdata->bss_conf.use_short_preamble &&
2432 ratesel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) 2480 sband->bitrates[ratesel.rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE)
2433 control.flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; 2481 control->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
2434 control.antenna_sel_tx = local->hw.conf.antenna_sel_tx; 2482 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
2435 control.flags |= IEEE80211_TXCTL_NO_ACK; 2483 control->flags |= IEEE80211_TX_CTL_NO_ACK;
2436 control.retry_limit = 1; 2484 control->control.retry_limit = 1;
2437 2485
2438 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC); 2486 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC);
2439 if (ifsta->probe_resp) { 2487 if (ifsta->probe_resp) {
@@ -2448,8 +2496,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2448 } 2496 }
2449 2497
2450 if (local->ops->beacon_update && 2498 if (local->ops->beacon_update &&
2451 local->ops->beacon_update(local_to_hw(local), 2499 local->ops->beacon_update(local_to_hw(local), skb) == 0) {
2452 skb, &control) == 0) {
2453 printk(KERN_DEBUG "%s: Configured IBSS beacon " 2500 printk(KERN_DEBUG "%s: Configured IBSS beacon "
2454 "template\n", dev->name); 2501 "template\n", dev->name);
2455 skb = NULL; 2502 skb = NULL;
@@ -2525,11 +2572,10 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2525 struct ieee80211_mgmt *mgmt, 2572 struct ieee80211_mgmt *mgmt,
2526 size_t len, 2573 size_t len,
2527 struct ieee80211_rx_status *rx_status, 2574 struct ieee80211_rx_status *rx_status,
2575 struct ieee802_11_elems *elems,
2528 int beacon) 2576 int beacon)
2529{ 2577{
2530 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2578 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2531 struct ieee802_11_elems elems;
2532 size_t baselen;
2533 int freq, clen; 2579 int freq, clen;
2534 struct ieee80211_sta_bss *bss; 2580 struct ieee80211_sta_bss *bss;
2535 struct sta_info *sta; 2581 struct sta_info *sta;
@@ -2542,35 +2588,24 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2542 if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN)) 2588 if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN))
2543 return; /* ignore ProbeResp to foreign address */ 2589 return; /* ignore ProbeResp to foreign address */
2544 2590
2545#if 0
2546 printk(KERN_DEBUG "%s: RX %s from %s to %s\n",
2547 dev->name, beacon ? "Beacon" : "Probe Response",
2548 print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da));
2549#endif
2550
2551 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
2552 if (baselen > len)
2553 return;
2554
2555 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); 2591 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
2556 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2557 2592
2558 if (ieee80211_vif_is_mesh(&sdata->vif) && elems.mesh_id && 2593 if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id &&
2559 elems.mesh_config && mesh_matches_local(&elems, dev)) { 2594 elems->mesh_config && mesh_matches_local(elems, dev)) {
2560 u64 rates = ieee80211_sta_get_rates(local, &elems, 2595 u64 rates = ieee80211_sta_get_rates(local, elems,
2561 rx_status->band); 2596 rx_status->band);
2562 2597
2563 mesh_neighbour_update(mgmt->sa, rates, dev, 2598 mesh_neighbour_update(mgmt->sa, rates, dev,
2564 mesh_peer_accepts_plinks(&elems, dev)); 2599 mesh_peer_accepts_plinks(elems, dev));
2565 } 2600 }
2566 2601
2567 rcu_read_lock(); 2602 rcu_read_lock();
2568 2603
2569 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && 2604 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates &&
2570 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && 2605 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
2571 (sta = sta_info_get(local, mgmt->sa))) { 2606 (sta = sta_info_get(local, mgmt->sa))) {
2572 u64 prev_rates; 2607 u64 prev_rates;
2573 u64 supp_rates = ieee80211_sta_get_rates(local, &elems, 2608 u64 supp_rates = ieee80211_sta_get_rates(local, elems,
2574 rx_status->band); 2609 rx_status->band);
2575 2610
2576 prev_rates = sta->supp_rates[rx_status->band]; 2611 prev_rates = sta->supp_rates[rx_status->band];
@@ -2582,21 +2617,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2582 sta->supp_rates[rx_status->band] = 2617 sta->supp_rates[rx_status->band] =
2583 sdata->u.sta.supp_rates_bits[rx_status->band]; 2618 sdata->u.sta.supp_rates_bits[rx_status->band];
2584 } 2619 }
2585 if (sta->supp_rates[rx_status->band] != prev_rates) {
2586 printk(KERN_DEBUG "%s: updated supp_rates set for "
2587 "%s based on beacon info (0x%llx & 0x%llx -> "
2588 "0x%llx)\n",
2589 dev->name, print_mac(mac, sta->addr),
2590 (unsigned long long) prev_rates,
2591 (unsigned long long) supp_rates,
2592 (unsigned long long) sta->supp_rates[rx_status->band]);
2593 }
2594 } 2620 }
2595 2621
2596 rcu_read_unlock(); 2622 rcu_read_unlock();
2597 2623
2598 if (elems.ds_params && elems.ds_params_len == 1) 2624 if (elems->ds_params && elems->ds_params_len == 1)
2599 freq = ieee80211_channel_to_frequency(elems.ds_params[0]); 2625 freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
2600 else 2626 else
2601 freq = rx_status->freq; 2627 freq = rx_status->freq;
2602 2628
@@ -2606,23 +2632,23 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2606 return; 2632 return;
2607 2633
2608#ifdef CONFIG_MAC80211_MESH 2634#ifdef CONFIG_MAC80211_MESH
2609 if (elems.mesh_config) 2635 if (elems->mesh_config)
2610 bss = ieee80211_rx_mesh_bss_get(dev, elems.mesh_id, 2636 bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id,
2611 elems.mesh_id_len, elems.mesh_config, freq); 2637 elems->mesh_id_len, elems->mesh_config, freq);
2612 else 2638 else
2613#endif 2639#endif
2614 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, 2640 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq,
2615 elems.ssid, elems.ssid_len); 2641 elems->ssid, elems->ssid_len);
2616 if (!bss) { 2642 if (!bss) {
2617#ifdef CONFIG_MAC80211_MESH 2643#ifdef CONFIG_MAC80211_MESH
2618 if (elems.mesh_config) 2644 if (elems->mesh_config)
2619 bss = ieee80211_rx_mesh_bss_add(dev, elems.mesh_id, 2645 bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id,
2620 elems.mesh_id_len, elems.mesh_config, 2646 elems->mesh_id_len, elems->mesh_config,
2621 elems.mesh_config_len, freq); 2647 elems->mesh_config_len, freq);
2622 else 2648 else
2623#endif 2649#endif
2624 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, 2650 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq,
2625 elems.ssid, elems.ssid_len); 2651 elems->ssid, elems->ssid_len);
2626 if (!bss) 2652 if (!bss)
2627 return; 2653 return;
2628 } else { 2654 } else {
@@ -2635,46 +2661,66 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2635 } 2661 }
2636 2662
2637 /* save the ERP value so that it is available at association time */ 2663 /* save the ERP value so that it is available at association time */
2638 if (elems.erp_info && elems.erp_info_len >= 1) { 2664 if (elems->erp_info && elems->erp_info_len >= 1) {
2639 bss->erp_value = elems.erp_info[0]; 2665 bss->erp_value = elems->erp_info[0];
2640 bss->has_erp_value = 1; 2666 bss->has_erp_value = 1;
2641 } 2667 }
2642 2668
2643 if (elems.ht_cap_elem && 2669 if (elems->ht_cap_elem &&
2644 (!bss->ht_ie || bss->ht_ie_len != elems.ht_cap_elem_len || 2670 (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len ||
2645 memcmp(bss->ht_ie, elems.ht_cap_elem, elems.ht_cap_elem_len))) { 2671 memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) {
2646 kfree(bss->ht_ie); 2672 kfree(bss->ht_ie);
2647 bss->ht_ie = kmalloc(elems.ht_cap_elem_len + 2, GFP_ATOMIC); 2673 bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC);
2648 if (bss->ht_ie) { 2674 if (bss->ht_ie) {
2649 memcpy(bss->ht_ie, elems.ht_cap_elem - 2, 2675 memcpy(bss->ht_ie, elems->ht_cap_elem - 2,
2650 elems.ht_cap_elem_len + 2); 2676 elems->ht_cap_elem_len + 2);
2651 bss->ht_ie_len = elems.ht_cap_elem_len + 2; 2677 bss->ht_ie_len = elems->ht_cap_elem_len + 2;
2652 } else 2678 } else
2653 bss->ht_ie_len = 0; 2679 bss->ht_ie_len = 0;
2654 } else if (!elems.ht_cap_elem && bss->ht_ie) { 2680 } else if (!elems->ht_cap_elem && bss->ht_ie) {
2655 kfree(bss->ht_ie); 2681 kfree(bss->ht_ie);
2656 bss->ht_ie = NULL; 2682 bss->ht_ie = NULL;
2657 bss->ht_ie_len = 0; 2683 bss->ht_ie_len = 0;
2658 } 2684 }
2659 2685
2686 if (elems->ht_info_elem &&
2687 (!bss->ht_add_ie ||
2688 bss->ht_add_ie_len != elems->ht_info_elem_len ||
2689 memcmp(bss->ht_add_ie, elems->ht_info_elem,
2690 elems->ht_info_elem_len))) {
2691 kfree(bss->ht_add_ie);
2692 bss->ht_add_ie =
2693 kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC);
2694 if (bss->ht_add_ie) {
2695 memcpy(bss->ht_add_ie, elems->ht_info_elem - 2,
2696 elems->ht_info_elem_len + 2);
2697 bss->ht_add_ie_len = elems->ht_info_elem_len + 2;
2698 } else
2699 bss->ht_add_ie_len = 0;
2700 } else if (!elems->ht_info_elem && bss->ht_add_ie) {
2701 kfree(bss->ht_add_ie);
2702 bss->ht_add_ie = NULL;
2703 bss->ht_add_ie_len = 0;
2704 }
2705
2660 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 2706 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
2661 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 2707 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
2662 2708
2663 bss->supp_rates_len = 0; 2709 bss->supp_rates_len = 0;
2664 if (elems.supp_rates) { 2710 if (elems->supp_rates) {
2665 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 2711 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2666 if (clen > elems.supp_rates_len) 2712 if (clen > elems->supp_rates_len)
2667 clen = elems.supp_rates_len; 2713 clen = elems->supp_rates_len;
2668 memcpy(&bss->supp_rates[bss->supp_rates_len], elems.supp_rates, 2714 memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
2669 clen); 2715 clen);
2670 bss->supp_rates_len += clen; 2716 bss->supp_rates_len += clen;
2671 } 2717 }
2672 if (elems.ext_supp_rates) { 2718 if (elems->ext_supp_rates) {
2673 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 2719 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2674 if (clen > elems.ext_supp_rates_len) 2720 if (clen > elems->ext_supp_rates_len)
2675 clen = elems.ext_supp_rates_len; 2721 clen = elems->ext_supp_rates_len;
2676 memcpy(&bss->supp_rates[bss->supp_rates_len], 2722 memcpy(&bss->supp_rates[bss->supp_rates_len],
2677 elems.ext_supp_rates, clen); 2723 elems->ext_supp_rates, clen);
2678 bss->supp_rates_len += clen; 2724 bss->supp_rates_len += clen;
2679 } 2725 }
2680 2726
@@ -2682,9 +2728,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2682 2728
2683 bss->timestamp = beacon_timestamp; 2729 bss->timestamp = beacon_timestamp;
2684 bss->last_update = jiffies; 2730 bss->last_update = jiffies;
2685 bss->rssi = rx_status->ssi;
2686 bss->signal = rx_status->signal; 2731 bss->signal = rx_status->signal;
2687 bss->noise = rx_status->noise; 2732 bss->noise = rx_status->noise;
2733 bss->qual = rx_status->qual;
2688 if (!beacon && !bss->probe_resp) 2734 if (!beacon && !bss->probe_resp)
2689 bss->probe_resp = true; 2735 bss->probe_resp = true;
2690 2736
@@ -2698,33 +2744,33 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2698 return; 2744 return;
2699 } 2745 }
2700 2746
2701 if (elems.wpa && 2747 if (elems->wpa &&
2702 (!bss->wpa_ie || bss->wpa_ie_len != elems.wpa_len || 2748 (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len ||
2703 memcmp(bss->wpa_ie, elems.wpa, elems.wpa_len))) { 2749 memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) {
2704 kfree(bss->wpa_ie); 2750 kfree(bss->wpa_ie);
2705 bss->wpa_ie = kmalloc(elems.wpa_len + 2, GFP_ATOMIC); 2751 bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC);
2706 if (bss->wpa_ie) { 2752 if (bss->wpa_ie) {
2707 memcpy(bss->wpa_ie, elems.wpa - 2, elems.wpa_len + 2); 2753 memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2);
2708 bss->wpa_ie_len = elems.wpa_len + 2; 2754 bss->wpa_ie_len = elems->wpa_len + 2;
2709 } else 2755 } else
2710 bss->wpa_ie_len = 0; 2756 bss->wpa_ie_len = 0;
2711 } else if (!elems.wpa && bss->wpa_ie) { 2757 } else if (!elems->wpa && bss->wpa_ie) {
2712 kfree(bss->wpa_ie); 2758 kfree(bss->wpa_ie);
2713 bss->wpa_ie = NULL; 2759 bss->wpa_ie = NULL;
2714 bss->wpa_ie_len = 0; 2760 bss->wpa_ie_len = 0;
2715 } 2761 }
2716 2762
2717 if (elems.rsn && 2763 if (elems->rsn &&
2718 (!bss->rsn_ie || bss->rsn_ie_len != elems.rsn_len || 2764 (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len ||
2719 memcmp(bss->rsn_ie, elems.rsn, elems.rsn_len))) { 2765 memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) {
2720 kfree(bss->rsn_ie); 2766 kfree(bss->rsn_ie);
2721 bss->rsn_ie = kmalloc(elems.rsn_len + 2, GFP_ATOMIC); 2767 bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC);
2722 if (bss->rsn_ie) { 2768 if (bss->rsn_ie) {
2723 memcpy(bss->rsn_ie, elems.rsn - 2, elems.rsn_len + 2); 2769 memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2);
2724 bss->rsn_ie_len = elems.rsn_len + 2; 2770 bss->rsn_ie_len = elems->rsn_len + 2;
2725 } else 2771 } else
2726 bss->rsn_ie_len = 0; 2772 bss->rsn_ie_len = 0;
2727 } else if (!elems.rsn && bss->rsn_ie) { 2773 } else if (!elems->rsn && bss->rsn_ie) {
2728 kfree(bss->rsn_ie); 2774 kfree(bss->rsn_ie);
2729 bss->rsn_ie = NULL; 2775 bss->rsn_ie = NULL;
2730 bss->rsn_ie_len = 0; 2776 bss->rsn_ie_len = 0;
@@ -2744,20 +2790,21 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2744 * inclusion of the WMM Parameters in beacons, however, is optional. 2790 * inclusion of the WMM Parameters in beacons, however, is optional.
2745 */ 2791 */
2746 2792
2747 if (elems.wmm_param && 2793 if (elems->wmm_param &&
2748 (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_param_len || 2794 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len ||
2749 memcmp(bss->wmm_ie, elems.wmm_param, elems.wmm_param_len))) { 2795 memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) {
2750 kfree(bss->wmm_ie); 2796 kfree(bss->wmm_ie);
2751 bss->wmm_ie = kmalloc(elems.wmm_param_len + 2, GFP_ATOMIC); 2797 bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC);
2752 if (bss->wmm_ie) { 2798 if (bss->wmm_ie) {
2753 memcpy(bss->wmm_ie, elems.wmm_param - 2, 2799 memcpy(bss->wmm_ie, elems->wmm_param - 2,
2754 elems.wmm_param_len + 2); 2800 elems->wmm_param_len + 2);
2755 bss->wmm_ie_len = elems.wmm_param_len + 2; 2801 bss->wmm_ie_len = elems->wmm_param_len + 2;
2756 } else 2802 } else
2757 bss->wmm_ie_len = 0; 2803 bss->wmm_ie_len = 0;
2758 } else if (elems.wmm_info && 2804 } else if (elems->wmm_info &&
2759 (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_info_len || 2805 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len ||
2760 memcmp(bss->wmm_ie, elems.wmm_info, elems.wmm_info_len))) { 2806 memcmp(bss->wmm_ie, elems->wmm_info,
2807 elems->wmm_info_len))) {
2761 /* As for certain AP's Fifth bit is not set in WMM IE in 2808 /* As for certain AP's Fifth bit is not set in WMM IE in
2762 * beacon frames.So while parsing the beacon frame the 2809 * beacon frames.So while parsing the beacon frame the
2763 * wmm_info structure is used instead of wmm_param. 2810 * wmm_info structure is used instead of wmm_param.
@@ -2767,14 +2814,14 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2767 * n-band association. 2814 * n-band association.
2768 */ 2815 */
2769 kfree(bss->wmm_ie); 2816 kfree(bss->wmm_ie);
2770 bss->wmm_ie = kmalloc(elems.wmm_info_len + 2, GFP_ATOMIC); 2817 bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC);
2771 if (bss->wmm_ie) { 2818 if (bss->wmm_ie) {
2772 memcpy(bss->wmm_ie, elems.wmm_info - 2, 2819 memcpy(bss->wmm_ie, elems->wmm_info - 2,
2773 elems.wmm_info_len + 2); 2820 elems->wmm_info_len + 2);
2774 bss->wmm_ie_len = elems.wmm_info_len + 2; 2821 bss->wmm_ie_len = elems->wmm_info_len + 2;
2775 } else 2822 } else
2776 bss->wmm_ie_len = 0; 2823 bss->wmm_ie_len = 0;
2777 } else if (!elems.wmm_param && !elems.wmm_info && bss->wmm_ie) { 2824 } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) {
2778 kfree(bss->wmm_ie); 2825 kfree(bss->wmm_ie);
2779 bss->wmm_ie = NULL; 2826 bss->wmm_ie = NULL;
2780 bss->wmm_ie_len = 0; 2827 bss->wmm_ie_len = 0;
@@ -2785,8 +2832,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2785 !local->sta_sw_scanning && !local->sta_hw_scanning && 2832 !local->sta_sw_scanning && !local->sta_hw_scanning &&
2786 bss->capability & WLAN_CAPABILITY_IBSS && 2833 bss->capability & WLAN_CAPABILITY_IBSS &&
2787 bss->freq == local->oper_channel->center_freq && 2834 bss->freq == local->oper_channel->center_freq &&
2788 elems.ssid_len == sdata->u.sta.ssid_len && 2835 elems->ssid_len == sdata->u.sta.ssid_len &&
2789 memcmp(elems.ssid, sdata->u.sta.ssid, sdata->u.sta.ssid_len) == 0) { 2836 memcmp(elems->ssid, sdata->u.sta.ssid,
2837 sdata->u.sta.ssid_len) == 0) {
2790 if (rx_status->flag & RX_FLAG_TSFT) { 2838 if (rx_status->flag & RX_FLAG_TSFT) {
2791 /* in order for correct IBSS merging we need mactime 2839 /* in order for correct IBSS merging we need mactime
2792 * 2840 *
@@ -2824,14 +2872,14 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2824#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2872#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2825 if (beacon_timestamp > rx_timestamp) { 2873 if (beacon_timestamp > rx_timestamp) {
2826#ifndef CONFIG_MAC80211_IBSS_DEBUG 2874#ifndef CONFIG_MAC80211_IBSS_DEBUG
2827 if (net_ratelimit()) 2875 printk(KERN_DEBUG "%s: beacon TSF higher than "
2876 "local TSF - IBSS merge with BSSID %s\n",
2877 dev->name, print_mac(mac, mgmt->bssid));
2828#endif 2878#endif
2829 printk(KERN_DEBUG "%s: beacon TSF higher than "
2830 "local TSF - IBSS merge with BSSID %s\n",
2831 dev->name, print_mac(mac, mgmt->bssid));
2832 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); 2879 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss);
2833 ieee80211_ibss_add_sta(dev, NULL, 2880 ieee80211_ibss_add_sta(dev, NULL,
2834 mgmt->bssid, mgmt->sa); 2881 mgmt->bssid, mgmt->sa,
2882 BIT(rx_status->rate_idx));
2835 } 2883 }
2836 } 2884 }
2837 2885
@@ -2844,7 +2892,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev,
2844 size_t len, 2892 size_t len,
2845 struct ieee80211_rx_status *rx_status) 2893 struct ieee80211_rx_status *rx_status)
2846{ 2894{
2847 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, 0); 2895 size_t baselen;
2896 struct ieee802_11_elems elems;
2897
2898 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
2899 if (baselen > len)
2900 return;
2901
2902 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
2903 &elems);
2904
2905 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0);
2848} 2906}
2849 2907
2850 2908
@@ -2861,7 +2919,14 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2861 struct ieee80211_conf *conf = &local->hw.conf; 2919 struct ieee80211_conf *conf = &local->hw.conf;
2862 u32 changed = 0; 2920 u32 changed = 0;
2863 2921
2864 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, 1); 2922 /* Process beacon from the current BSS */
2923 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
2924 if (baselen > len)
2925 return;
2926
2927 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2928
2929 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1);
2865 2930
2866 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2931 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2867 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 2932 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
@@ -2872,17 +2937,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2872 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 2937 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
2873 return; 2938 return;
2874 2939
2875 /* Process beacon from the current BSS */ 2940 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2876 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 2941 elems.wmm_param_len);
2877 if (baselen > len)
2878 return;
2879
2880 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2881
2882 if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
2883 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
2884 elems.wmm_param_len);
2885 }
2886 2942
2887 /* Do not send changes to driver if we are scanning. This removes 2943 /* Do not send changes to driver if we are scanning. This removes
2888 * requirement that driver's bss_info_changed function needs to be 2944 * requirement that driver's bss_info_changed function needs to be
@@ -2959,11 +3015,11 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2959 pos = mgmt->u.probe_req.variable; 3015 pos = mgmt->u.probe_req.variable;
2960 if (pos[0] != WLAN_EID_SSID || 3016 if (pos[0] != WLAN_EID_SSID ||
2961 pos + 2 + pos[1] > end) { 3017 pos + 2 + pos[1] > end) {
2962 if (net_ratelimit()) { 3018#ifdef CONFIG_MAC80211_IBSS_DEBUG
2963 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 3019 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
2964 "from %s\n", 3020 "from %s\n",
2965 dev->name, print_mac(mac, mgmt->sa)); 3021 dev->name, print_mac(mac, mgmt->sa));
2966 } 3022#endif
2967 return; 3023 return;
2968 } 3024 }
2969 if (pos[1] != 0 && 3025 if (pos[1] != 0 &&
@@ -2994,11 +3050,24 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
2994 struct ieee80211_rx_status *rx_status) 3050 struct ieee80211_rx_status *rx_status)
2995{ 3051{
2996 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3052 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3053 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2997 3054
2998 if (len < IEEE80211_MIN_ACTION_SIZE) 3055 if (len < IEEE80211_MIN_ACTION_SIZE)
2999 return; 3056 return;
3000 3057
3001 switch (mgmt->u.action.category) { 3058 switch (mgmt->u.action.category) {
3059 case WLAN_CATEGORY_SPECTRUM_MGMT:
3060 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
3061 break;
3062 switch (mgmt->u.action.u.chan_switch.action_code) {
3063 case WLAN_ACTION_SPCT_MSR_REQ:
3064 if (len < (IEEE80211_MIN_ACTION_SIZE +
3065 sizeof(mgmt->u.action.u.measurement)))
3066 break;
3067 ieee80211_sta_process_measurement_req(dev, mgmt, len);
3068 break;
3069 }
3070 break;
3002 case WLAN_CATEGORY_BACK: 3071 case WLAN_CATEGORY_BACK:
3003 switch (mgmt->u.action.u.addba_req.action_code) { 3072 switch (mgmt->u.action.u.addba_req.action_code) {
3004 case WLAN_ACTION_ADDBA_REQ: 3073 case WLAN_ACTION_ADDBA_REQ:
@@ -3019,11 +3088,6 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
3019 break; 3088 break;
3020 ieee80211_sta_process_delba(dev, mgmt, len); 3089 ieee80211_sta_process_delba(dev, mgmt, len);
3021 break; 3090 break;
3022 default:
3023 if (net_ratelimit())
3024 printk(KERN_DEBUG "%s: Rx unknown A-MPDU action\n",
3025 dev->name);
3026 break;
3027 } 3091 }
3028 break; 3092 break;
3029 case PLINK_CATEGORY: 3093 case PLINK_CATEGORY:
@@ -3034,11 +3098,6 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
3034 if (ieee80211_vif_is_mesh(&sdata->vif)) 3098 if (ieee80211_vif_is_mesh(&sdata->vif))
3035 mesh_rx_path_sel_frame(dev, mgmt, len); 3099 mesh_rx_path_sel_frame(dev, mgmt, len);
3036 break; 3100 break;
3037 default:
3038 if (net_ratelimit())
3039 printk(KERN_DEBUG "%s: Rx unknown action frame - "
3040 "category=%d\n", dev->name, mgmt->u.action.category);
3041 break;
3042 } 3101 }
3043} 3102}
3044 3103
@@ -3074,11 +3133,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3074 skb_queue_tail(&ifsta->skb_queue, skb); 3133 skb_queue_tail(&ifsta->skb_queue, skb);
3075 queue_work(local->hw.workqueue, &ifsta->work); 3134 queue_work(local->hw.workqueue, &ifsta->work);
3076 return; 3135 return;
3077 default:
3078 printk(KERN_DEBUG "%s: received unknown management frame - "
3079 "stype=%d\n", dev->name,
3080 (fc & IEEE80211_FCTL_STYPE) >> 4);
3081 break;
3082 } 3136 }
3083 3137
3084 fail: 3138 fail:
@@ -3142,33 +3196,32 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
3142 struct ieee80211_rx_status *rx_status) 3196 struct ieee80211_rx_status *rx_status)
3143{ 3197{
3144 struct ieee80211_mgmt *mgmt; 3198 struct ieee80211_mgmt *mgmt;
3145 u16 fc; 3199 __le16 fc;
3146 3200
3147 if (skb->len < 2) 3201 if (skb->len < 2)
3148 return RX_DROP_UNUSABLE; 3202 return RX_DROP_UNUSABLE;
3149 3203
3150 mgmt = (struct ieee80211_mgmt *) skb->data; 3204 mgmt = (struct ieee80211_mgmt *) skb->data;
3151 fc = le16_to_cpu(mgmt->frame_control); 3205 fc = mgmt->frame_control;
3152 3206
3153 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 3207 if (ieee80211_is_ctl(fc))
3154 return RX_CONTINUE; 3208 return RX_CONTINUE;
3155 3209
3156 if (skb->len < 24) 3210 if (skb->len < 24)
3157 return RX_DROP_MONITOR; 3211 return RX_DROP_MONITOR;
3158 3212
3159 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 3213 if (ieee80211_is_probe_resp(fc)) {
3160 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) { 3214 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status);
3161 ieee80211_rx_mgmt_probe_resp(dev, mgmt, 3215 dev_kfree_skb(skb);
3162 skb->len, rx_status); 3216 return RX_QUEUED;
3163 dev_kfree_skb(skb);
3164 return RX_QUEUED;
3165 } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) {
3166 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len,
3167 rx_status);
3168 dev_kfree_skb(skb);
3169 return RX_QUEUED;
3170 }
3171 } 3217 }
3218
3219 if (ieee80211_is_beacon(fc)) {
3220 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status);
3221 dev_kfree_skb(skb);
3222 return RX_QUEUED;
3223 }
3224
3172 return RX_CONTINUE; 3225 return RX_CONTINUE;
3173} 3226}
3174 3227
@@ -3208,8 +3261,10 @@ static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time)
3208 spin_lock_irqsave(&local->sta_lock, flags); 3261 spin_lock_irqsave(&local->sta_lock, flags);
3209 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) 3262 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
3210 if (time_after(jiffies, sta->last_rx + exp_time)) { 3263 if (time_after(jiffies, sta->last_rx + exp_time)) {
3264#ifdef CONFIG_MAC80211_IBSS_DEBUG
3211 printk(KERN_DEBUG "%s: expiring inactive STA %s\n", 3265 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
3212 dev->name, print_mac(mac, sta->addr)); 3266 dev->name, print_mac(mac, sta->addr));
3267#endif
3213 __sta_info_unlink(&sta); 3268 __sta_info_unlink(&sta);
3214 if (sta) 3269 if (sta)
3215 list_add(&sta->list, &tmp_list); 3270 list_add(&sta->list, &tmp_list);
@@ -3292,13 +3347,10 @@ void ieee80211_sta_work(struct work_struct *work)
3292 if (local->sta_sw_scanning || local->sta_hw_scanning) 3347 if (local->sta_sw_scanning || local->sta_hw_scanning)
3293 return; 3348 return;
3294 3349
3295 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 3350 if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA &&
3296 sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 3351 sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
3297 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) { 3352 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
3298 printk(KERN_DEBUG "%s: ieee80211_sta_work: non-STA interface "
3299 "(type=%d)\n", dev->name, sdata->vif.type);
3300 return; 3353 return;
3301 }
3302 ifsta = &sdata->u.sta; 3354 ifsta = &sdata->u.sta;
3303 3355
3304 while ((skb = skb_dequeue(&ifsta->skb_queue))) 3356 while ((skb = skb_dequeue(&ifsta->skb_queue)))
@@ -3352,8 +3404,7 @@ void ieee80211_sta_work(struct work_struct *work)
3352 break; 3404 break;
3353#endif 3405#endif
3354 default: 3406 default:
3355 printk(KERN_DEBUG "ieee80211_sta_work: Unknown state %d\n", 3407 WARN_ON(1);
3356 ifsta->state);
3357 break; 3408 break;
3358 } 3409 }
3359 3410
@@ -3388,8 +3439,6 @@ static void ieee80211_sta_reset_auth(struct net_device *dev,
3388 ifsta->auth_alg = WLAN_AUTH_LEAP; 3439 ifsta->auth_alg = WLAN_AUTH_LEAP;
3389 else 3440 else
3390 ifsta->auth_alg = WLAN_AUTH_OPEN; 3441 ifsta->auth_alg = WLAN_AUTH_OPEN;
3391 printk(KERN_DEBUG "%s: Initial auth_alg=%d\n", dev->name,
3392 ifsta->auth_alg);
3393 ifsta->auth_transaction = -1; 3442 ifsta->auth_transaction = -1;
3394 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 3443 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
3395 ifsta->auth_tries = ifsta->assoc_tries = 0; 3444 ifsta->auth_tries = ifsta->assoc_tries = 0;
@@ -3478,9 +3527,9 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
3478 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) 3527 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
3479 continue; 3528 continue;
3480 3529
3481 if (!selected || top_rssi < bss->rssi) { 3530 if (!selected || top_rssi < bss->signal) {
3482 selected = bss; 3531 selected = bss;
3483 top_rssi = bss->rssi; 3532 top_rssi = bss->signal;
3484 } 3533 }
3485 } 3534 }
3486 if (selected) 3535 if (selected)
@@ -3553,14 +3602,16 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3553 sband = local->hw.wiphy->bands[bss->band]; 3602 sband = local->hw.wiphy->bands[bss->band];
3554 3603
3555 if (local->hw.conf.beacon_int == 0) 3604 if (local->hw.conf.beacon_int == 0)
3556 local->hw.conf.beacon_int = 10000; 3605 local->hw.conf.beacon_int = 100;
3557 bss->beacon_int = local->hw.conf.beacon_int; 3606 bss->beacon_int = local->hw.conf.beacon_int;
3558 bss->last_update = jiffies; 3607 bss->last_update = jiffies;
3559 bss->capability = WLAN_CAPABILITY_IBSS; 3608 bss->capability = WLAN_CAPABILITY_IBSS;
3560 if (sdata->default_key) { 3609
3610 if (sdata->default_key)
3561 bss->capability |= WLAN_CAPABILITY_PRIVACY; 3611 bss->capability |= WLAN_CAPABILITY_PRIVACY;
3562 } else 3612 else
3563 sdata->drop_unencrypted = 0; 3613 sdata->drop_unencrypted = 0;
3614
3564 bss->supp_rates_len = sband->n_bitrates; 3615 bss->supp_rates_len = sband->n_bitrates;
3565 pos = bss->supp_rates; 3616 pos = bss->supp_rates;
3566 for (i = 0; i < sband->n_bitrates; i++) { 3617 for (i = 0; i < sband->n_bitrates; i++) {
@@ -3744,7 +3795,7 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local,
3744{ 3795{
3745 struct sk_buff *skb; 3796 struct sk_buff *skb;
3746 struct ieee80211_hdr *nullfunc; 3797 struct ieee80211_hdr *nullfunc;
3747 u16 fc; 3798 __le16 fc;
3748 3799
3749 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 3800 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
3750 if (!skb) { 3801 if (!skb) {
@@ -3756,11 +3807,11 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local,
3756 3807
3757 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 3808 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
3758 memset(nullfunc, 0, 24); 3809 memset(nullfunc, 0, 24);
3759 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 3810 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
3760 IEEE80211_FCTL_TODS; 3811 IEEE80211_FCTL_TODS);
3761 if (powersave) 3812 if (powersave)
3762 fc |= IEEE80211_FCTL_PM; 3813 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
3763 nullfunc->frame_control = cpu_to_le16(fc); 3814 nullfunc->frame_control = fc;
3764 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); 3815 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
3765 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 3816 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
3766 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); 3817 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
@@ -4054,6 +4105,7 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
4054 4105
4055static char * 4106static char *
4056ieee80211_sta_scan_result(struct net_device *dev, 4107ieee80211_sta_scan_result(struct net_device *dev,
4108 struct iw_request_info *info,
4057 struct ieee80211_sta_bss *bss, 4109 struct ieee80211_sta_bss *bss,
4058 char *current_ev, char *end_buf) 4110 char *current_ev, char *end_buf)
4059{ 4111{
@@ -4068,7 +4120,7 @@ ieee80211_sta_scan_result(struct net_device *dev,
4068 iwe.cmd = SIOCGIWAP; 4120 iwe.cmd = SIOCGIWAP;
4069 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 4121 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
4070 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); 4122 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
4071 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4123 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4072 IW_EV_ADDR_LEN); 4124 IW_EV_ADDR_LEN);
4073 4125
4074 memset(&iwe, 0, sizeof(iwe)); 4126 memset(&iwe, 0, sizeof(iwe));
@@ -4076,13 +4128,13 @@ ieee80211_sta_scan_result(struct net_device *dev,
4076 if (bss_mesh_cfg(bss)) { 4128 if (bss_mesh_cfg(bss)) {
4077 iwe.u.data.length = bss_mesh_id_len(bss); 4129 iwe.u.data.length = bss_mesh_id_len(bss);
4078 iwe.u.data.flags = 1; 4130 iwe.u.data.flags = 1;
4079 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4131 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4080 bss_mesh_id(bss)); 4132 &iwe, bss_mesh_id(bss));
4081 } else { 4133 } else {
4082 iwe.u.data.length = bss->ssid_len; 4134 iwe.u.data.length = bss->ssid_len;
4083 iwe.u.data.flags = 1; 4135 iwe.u.data.flags = 1;
4084 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4136 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4085 bss->ssid); 4137 &iwe, bss->ssid);
4086 } 4138 }
4087 4139
4088 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) 4140 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
@@ -4095,30 +4147,30 @@ ieee80211_sta_scan_result(struct net_device *dev,
4095 iwe.u.mode = IW_MODE_MASTER; 4147 iwe.u.mode = IW_MODE_MASTER;
4096 else 4148 else
4097 iwe.u.mode = IW_MODE_ADHOC; 4149 iwe.u.mode = IW_MODE_ADHOC;
4098 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4150 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
4099 IW_EV_UINT_LEN); 4151 &iwe, IW_EV_UINT_LEN);
4100 } 4152 }
4101 4153
4102 memset(&iwe, 0, sizeof(iwe)); 4154 memset(&iwe, 0, sizeof(iwe));
4103 iwe.cmd = SIOCGIWFREQ; 4155 iwe.cmd = SIOCGIWFREQ;
4104 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); 4156 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
4105 iwe.u.freq.e = 0; 4157 iwe.u.freq.e = 0;
4106 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4158 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4107 IW_EV_FREQ_LEN); 4159 IW_EV_FREQ_LEN);
4108 4160
4109 memset(&iwe, 0, sizeof(iwe)); 4161 memset(&iwe, 0, sizeof(iwe));
4110 iwe.cmd = SIOCGIWFREQ; 4162 iwe.cmd = SIOCGIWFREQ;
4111 iwe.u.freq.m = bss->freq; 4163 iwe.u.freq.m = bss->freq;
4112 iwe.u.freq.e = 6; 4164 iwe.u.freq.e = 6;
4113 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4165 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4114 IW_EV_FREQ_LEN); 4166 IW_EV_FREQ_LEN);
4115 memset(&iwe, 0, sizeof(iwe)); 4167 memset(&iwe, 0, sizeof(iwe));
4116 iwe.cmd = IWEVQUAL; 4168 iwe.cmd = IWEVQUAL;
4117 iwe.u.qual.qual = bss->signal; 4169 iwe.u.qual.qual = bss->qual;
4118 iwe.u.qual.level = bss->rssi; 4170 iwe.u.qual.level = bss->signal;
4119 iwe.u.qual.noise = bss->noise; 4171 iwe.u.qual.noise = bss->noise;
4120 iwe.u.qual.updated = local->wstats_flags; 4172 iwe.u.qual.updated = local->wstats_flags;
4121 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 4173 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4122 IW_EV_QUAL_LEN); 4174 IW_EV_QUAL_LEN);
4123 4175
4124 memset(&iwe, 0, sizeof(iwe)); 4176 memset(&iwe, 0, sizeof(iwe));
@@ -4128,27 +4180,36 @@ ieee80211_sta_scan_result(struct net_device *dev,
4128 else 4180 else
4129 iwe.u.data.flags = IW_ENCODE_DISABLED; 4181 iwe.u.data.flags = IW_ENCODE_DISABLED;
4130 iwe.u.data.length = 0; 4182 iwe.u.data.length = 0;
4131 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ""); 4183 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4184 &iwe, "");
4132 4185
4133 if (bss && bss->wpa_ie) { 4186 if (bss && bss->wpa_ie) {
4134 memset(&iwe, 0, sizeof(iwe)); 4187 memset(&iwe, 0, sizeof(iwe));
4135 iwe.cmd = IWEVGENIE; 4188 iwe.cmd = IWEVGENIE;
4136 iwe.u.data.length = bss->wpa_ie_len; 4189 iwe.u.data.length = bss->wpa_ie_len;
4137 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4190 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4138 bss->wpa_ie); 4191 &iwe, bss->wpa_ie);
4139 } 4192 }
4140 4193
4141 if (bss && bss->rsn_ie) { 4194 if (bss && bss->rsn_ie) {
4142 memset(&iwe, 0, sizeof(iwe)); 4195 memset(&iwe, 0, sizeof(iwe));
4143 iwe.cmd = IWEVGENIE; 4196 iwe.cmd = IWEVGENIE;
4144 iwe.u.data.length = bss->rsn_ie_len; 4197 iwe.u.data.length = bss->rsn_ie_len;
4145 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, 4198 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4146 bss->rsn_ie); 4199 &iwe, bss->rsn_ie);
4200 }
4201
4202 if (bss && bss->ht_ie) {
4203 memset(&iwe, 0, sizeof(iwe));
4204 iwe.cmd = IWEVGENIE;
4205 iwe.u.data.length = bss->ht_ie_len;
4206 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4207 &iwe, bss->ht_ie);
4147 } 4208 }
4148 4209
4149 if (bss && bss->supp_rates_len > 0) { 4210 if (bss && bss->supp_rates_len > 0) {
4150 /* display all supported rates in readable format */ 4211 /* display all supported rates in readable format */
4151 char *p = current_ev + IW_EV_LCP_LEN; 4212 char *p = current_ev + iwe_stream_lcp_len(info);
4152 int i; 4213 int i;
4153 4214
4154 memset(&iwe, 0, sizeof(iwe)); 4215 memset(&iwe, 0, sizeof(iwe));
@@ -4159,7 +4220,7 @@ ieee80211_sta_scan_result(struct net_device *dev,
4159 for (i = 0; i < bss->supp_rates_len; i++) { 4220 for (i = 0; i < bss->supp_rates_len; i++) {
4160 iwe.u.bitrate.value = ((bss->supp_rates[i] & 4221 iwe.u.bitrate.value = ((bss->supp_rates[i] &
4161 0x7f) * 500000); 4222 0x7f) * 500000);
4162 p = iwe_stream_add_value(current_ev, p, 4223 p = iwe_stream_add_value(info, current_ev, p,
4163 end_buf, &iwe, IW_EV_PARAM_LEN); 4224 end_buf, &iwe, IW_EV_PARAM_LEN);
4164 } 4225 }
4165 current_ev = p; 4226 current_ev = p;
@@ -4173,8 +4234,16 @@ ieee80211_sta_scan_result(struct net_device *dev,
4173 iwe.cmd = IWEVCUSTOM; 4234 iwe.cmd = IWEVCUSTOM;
4174 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); 4235 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
4175 iwe.u.data.length = strlen(buf); 4236 iwe.u.data.length = strlen(buf);
4176 current_ev = iwe_stream_add_point(current_ev, end_buf, 4237 current_ev = iwe_stream_add_point(info, current_ev,
4238 end_buf,
4177 &iwe, buf); 4239 &iwe, buf);
4240 memset(&iwe, 0, sizeof(iwe));
4241 iwe.cmd = IWEVCUSTOM;
4242 sprintf(buf, " Last beacon: %dms ago",
4243 jiffies_to_msecs(jiffies - bss->last_update));
4244 iwe.u.data.length = strlen(buf);
4245 current_ev = iwe_stream_add_point(info, current_ev,
4246 end_buf, &iwe, buf);
4178 kfree(buf); 4247 kfree(buf);
4179 } 4248 }
4180 } 4249 }
@@ -4188,31 +4257,36 @@ ieee80211_sta_scan_result(struct net_device *dev,
4188 iwe.cmd = IWEVCUSTOM; 4257 iwe.cmd = IWEVCUSTOM;
4189 sprintf(buf, "Mesh network (version %d)", cfg[0]); 4258 sprintf(buf, "Mesh network (version %d)", cfg[0]);
4190 iwe.u.data.length = strlen(buf); 4259 iwe.u.data.length = strlen(buf);
4191 current_ev = iwe_stream_add_point(current_ev, end_buf, 4260 current_ev = iwe_stream_add_point(info, current_ev,
4261 end_buf,
4192 &iwe, buf); 4262 &iwe, buf);
4193 sprintf(buf, "Path Selection Protocol ID: " 4263 sprintf(buf, "Path Selection Protocol ID: "
4194 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], 4264 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
4195 cfg[4]); 4265 cfg[4]);
4196 iwe.u.data.length = strlen(buf); 4266 iwe.u.data.length = strlen(buf);
4197 current_ev = iwe_stream_add_point(current_ev, end_buf, 4267 current_ev = iwe_stream_add_point(info, current_ev,
4268 end_buf,
4198 &iwe, buf); 4269 &iwe, buf);
4199 sprintf(buf, "Path Selection Metric ID: " 4270 sprintf(buf, "Path Selection Metric ID: "
4200 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], 4271 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
4201 cfg[8]); 4272 cfg[8]);
4202 iwe.u.data.length = strlen(buf); 4273 iwe.u.data.length = strlen(buf);
4203 current_ev = iwe_stream_add_point(current_ev, end_buf, 4274 current_ev = iwe_stream_add_point(info, current_ev,
4275 end_buf,
4204 &iwe, buf); 4276 &iwe, buf);
4205 sprintf(buf, "Congestion Control Mode ID: " 4277 sprintf(buf, "Congestion Control Mode ID: "
4206 "0x%02X%02X%02X%02X", cfg[9], cfg[10], 4278 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
4207 cfg[11], cfg[12]); 4279 cfg[11], cfg[12]);
4208 iwe.u.data.length = strlen(buf); 4280 iwe.u.data.length = strlen(buf);
4209 current_ev = iwe_stream_add_point(current_ev, end_buf, 4281 current_ev = iwe_stream_add_point(info, current_ev,
4282 end_buf,
4210 &iwe, buf); 4283 &iwe, buf);
4211 sprintf(buf, "Channel Precedence: " 4284 sprintf(buf, "Channel Precedence: "
4212 "0x%02X%02X%02X%02X", cfg[13], cfg[14], 4285 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
4213 cfg[15], cfg[16]); 4286 cfg[15], cfg[16]);
4214 iwe.u.data.length = strlen(buf); 4287 iwe.u.data.length = strlen(buf);
4215 current_ev = iwe_stream_add_point(current_ev, end_buf, 4288 current_ev = iwe_stream_add_point(info, current_ev,
4289 end_buf,
4216 &iwe, buf); 4290 &iwe, buf);
4217 kfree(buf); 4291 kfree(buf);
4218 } 4292 }
@@ -4222,7 +4296,9 @@ ieee80211_sta_scan_result(struct net_device *dev,
4222} 4296}
4223 4297
4224 4298
4225int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len) 4299int ieee80211_sta_scan_results(struct net_device *dev,
4300 struct iw_request_info *info,
4301 char *buf, size_t len)
4226{ 4302{
4227 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 4303 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4228 char *current_ev = buf; 4304 char *current_ev = buf;
@@ -4235,8 +4311,8 @@ int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len)
4235 spin_unlock_bh(&local->sta_bss_lock); 4311 spin_unlock_bh(&local->sta_bss_lock);
4236 return -E2BIG; 4312 return -E2BIG;
4237 } 4313 }
4238 current_ev = ieee80211_sta_scan_result(dev, bss, current_ev, 4314 current_ev = ieee80211_sta_scan_result(dev, info, bss,
4239 end_buf); 4315 current_ev, end_buf);
4240 } 4316 }
4241 spin_unlock_bh(&local->sta_bss_lock); 4317 spin_unlock_bh(&local->sta_bss_lock);
4242 return current_ev - buf; 4318 return current_ev - buf;
@@ -4247,6 +4323,7 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4247{ 4323{
4248 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4324 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4249 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4325 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4326
4250 kfree(ifsta->extra_ie); 4327 kfree(ifsta->extra_ie);
4251 if (len == 0) { 4328 if (len == 0) {
4252 ifsta->extra_ie = NULL; 4329 ifsta->extra_ie = NULL;
@@ -4264,14 +4341,15 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4264} 4341}
4265 4342
4266 4343
4267struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, 4344struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4268 struct sk_buff *skb, u8 *bssid, 4345 struct sk_buff *skb, u8 *bssid,
4269 u8 *addr) 4346 u8 *addr, u64 supp_rates)
4270{ 4347{
4271 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 4348 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4272 struct sta_info *sta; 4349 struct sta_info *sta;
4273 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4350 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4274 DECLARE_MAC_BUF(mac); 4351 DECLARE_MAC_BUF(mac);
4352 int band = local->hw.conf.channel->band;
4275 4353
4276 /* TODO: Could consider removing the least recently used entry and 4354 /* TODO: Could consider removing the least recently used entry and
4277 * allow new one to be added. */ 4355 * allow new one to be added. */
@@ -4283,17 +4361,24 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev,
4283 return NULL; 4361 return NULL;
4284 } 4362 }
4285 4363
4364 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid))
4365 return NULL;
4366
4367#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
4286 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", 4368 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
4287 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); 4369 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name);
4370#endif
4288 4371
4289 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 4372 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
4290 if (!sta) 4373 if (!sta)
4291 return NULL; 4374 return NULL;
4292 4375
4293 sta->flags |= WLAN_STA_AUTHORIZED; 4376 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
4294 4377
4295 sta->supp_rates[local->hw.conf.channel->band] = 4378 if (supp_rates)
4296 sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band]; 4379 sta->supp_rates[band] = supp_rates;
4380 else
4381 sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band];
4297 4382
4298 rate_control_rate_init(sta, local); 4383 rate_control_rate_init(sta, local);
4299 4384
@@ -4309,7 +4394,7 @@ int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason)
4309 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4394 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4310 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4395 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4311 4396
4312 printk(KERN_DEBUG "%s: deauthenticate(reason=%d)\n", 4397 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
4313 dev->name, reason); 4398 dev->name, reason);
4314 4399
4315 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 4400 if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
@@ -4327,7 +4412,7 @@ int ieee80211_sta_disassociate(struct net_device *dev, u16 reason)
4327 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 4412 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4328 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4413 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4329 4414
4330 printk(KERN_DEBUG "%s: disassociate(reason=%d)\n", 4415 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
4331 dev->name, reason); 4416 dev->name, reason);
4332 4417
4333 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 4418 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 841df93807fc..0388c090dfe9 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -176,20 +176,24 @@ void rate_control_get_rate(struct net_device *dev,
176 rcu_read_lock(); 176 rcu_read_lock();
177 sta = sta_info_get(local, hdr->addr1); 177 sta = sta_info_get(local, hdr->addr1);
178 178
179 memset(sel, 0, sizeof(struct rate_selection)); 179 sel->rate_idx = -1;
180 sel->nonerp_idx = -1;
181 sel->probe_idx = -1;
180 182
181 ref->ops->get_rate(ref->priv, dev, sband, skb, sel); 183 ref->ops->get_rate(ref->priv, dev, sband, skb, sel);
182 184
185 BUG_ON(sel->rate_idx < 0);
186
183 /* Select a non-ERP backup rate. */ 187 /* Select a non-ERP backup rate. */
184 if (!sel->nonerp) { 188 if (sel->nonerp_idx < 0) {
185 for (i = 0; i < sband->n_bitrates; i++) { 189 for (i = 0; i < sband->n_bitrates; i++) {
186 struct ieee80211_rate *rate = &sband->bitrates[i]; 190 struct ieee80211_rate *rate = &sband->bitrates[i];
187 if (sel->rate->bitrate < rate->bitrate) 191 if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate)
188 break; 192 break;
189 193
190 if (rate_supported(sta, sband->band, i) && 194 if (rate_supported(sta, sband->band, i) &&
191 !(rate->flags & IEEE80211_RATE_ERP_G)) 195 !(rate->flags & IEEE80211_RATE_ERP_G))
192 sel->nonerp = rate; 196 sel->nonerp_idx = i;
193 } 197 }
194 } 198 }
195 199
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 5b45f33cb766..ede7ab56f65b 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -19,22 +19,22 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "sta_info.h" 20#include "sta_info.h"
21 21
22/* TODO: kdoc */ 22/**
23 * struct rate_selection - rate selection for rate control algos
24 * @rate: selected transmission rate index
25 * @nonerp: Non-ERP rate to use instead if ERP cannot be used
26 * @probe: rate for probing (or -1)
27 *
28 */
23struct rate_selection { 29struct rate_selection {
24 /* Selected transmission rate */ 30 s8 rate_idx, nonerp_idx, probe_idx;
25 struct ieee80211_rate *rate;
26 /* Non-ERP rate to use if mac80211 decides it cannot use an ERP rate */
27 struct ieee80211_rate *nonerp;
28 /* probe with this rate, or NULL for no probing */
29 struct ieee80211_rate *probe;
30}; 31};
31 32
32struct rate_control_ops { 33struct rate_control_ops {
33 struct module *module; 34 struct module *module;
34 const char *name; 35 const char *name;
35 void (*tx_status)(void *priv, struct net_device *dev, 36 void (*tx_status)(void *priv, struct net_device *dev,
36 struct sk_buff *skb, 37 struct sk_buff *skb);
37 struct ieee80211_tx_status *status);
38 void (*get_rate)(void *priv, struct net_device *dev, 38 void (*get_rate)(void *priv, struct net_device *dev,
39 struct ieee80211_supported_band *band, 39 struct ieee80211_supported_band *band,
40 struct sk_buff *skb, 40 struct sk_buff *skb,
@@ -76,13 +76,12 @@ struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
76void rate_control_put(struct rate_control_ref *ref); 76void rate_control_put(struct rate_control_ref *ref);
77 77
78static inline void rate_control_tx_status(struct net_device *dev, 78static inline void rate_control_tx_status(struct net_device *dev,
79 struct sk_buff *skb, 79 struct sk_buff *skb)
80 struct ieee80211_tx_status *status)
81{ 80{
82 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 81 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
83 struct rate_control_ref *ref = local->rate_ctrl; 82 struct rate_control_ref *ref = local->rate_ctrl;
84 83
85 ref->ops->tx_status(ref->priv, dev, skb, status); 84 ref->ops->tx_status(ref->priv, dev, skb);
86} 85}
87 86
88 87
@@ -138,7 +137,7 @@ static inline int rate_supported(struct sta_info *sta,
138 return (sta == NULL || sta->supp_rates[band] & BIT(index)); 137 return (sta == NULL || sta->supp_rates[band] & BIT(index));
139} 138}
140 139
141static inline int 140static inline s8
142rate_lowest_index(struct ieee80211_local *local, 141rate_lowest_index(struct ieee80211_local *local,
143 struct ieee80211_supported_band *sband, 142 struct ieee80211_supported_band *sband,
144 struct sta_info *sta) 143 struct sta_info *sta)
@@ -155,14 +154,6 @@ rate_lowest_index(struct ieee80211_local *local,
155 return 0; 154 return 0;
156} 155}
157 156
158static inline struct ieee80211_rate *
159rate_lowest(struct ieee80211_local *local,
160 struct ieee80211_supported_band *sband,
161 struct sta_info *sta)
162{
163 return &sband->bitrates[rate_lowest_index(local, sband, sta)];
164}
165
166 157
167/* functions for rate control related to a device */ 158/* functions for rate control related to a device */
168int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 159int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
@@ -171,9 +162,7 @@ void rate_control_deinitialize(struct ieee80211_local *local);
171 162
172 163
173/* Rate control algorithms */ 164/* Rate control algorithms */
174#if defined(RC80211_PID_COMPILE) || \ 165#ifdef CONFIG_MAC80211_RC_PID
175 (defined(CONFIG_MAC80211_RC_PID) && \
176 !defined(CONFIG_MAC80211_RC_PID_MODULE))
177extern int rc80211_pid_init(void); 166extern int rc80211_pid_init(void);
178extern void rc80211_pid_exit(void); 167extern void rc80211_pid_exit(void);
179#else 168#else
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 04afc13ed825..2078803d3581 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -61,7 +61,7 @@ enum rc_pid_event_type {
61union rc_pid_event_data { 61union rc_pid_event_data {
62 /* RC_PID_EVENT_TX_STATUS */ 62 /* RC_PID_EVENT_TX_STATUS */
63 struct { 63 struct {
64 struct ieee80211_tx_status tx_status; 64 struct ieee80211_tx_info tx_status;
65 }; 65 };
66 /* RC_PID_EVENT_TYPE_RATE_CHANGE */ 66 /* RC_PID_EVENT_TYPE_RATE_CHANGE */
67 /* RC_PID_EVENT_TYPE_TX_RATE */ 67 /* RC_PID_EVENT_TYPE_TX_RATE */
@@ -158,7 +158,7 @@ struct rc_pid_debugfs_entries {
158}; 158};
159 159
160void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, 160void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
161 struct ieee80211_tx_status *stat); 161 struct ieee80211_tx_info *stat);
162 162
163void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf, 163void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf,
164 int index, int rate); 164 int index, int rate);
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index a849b745bdb5..62388f8e9024 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -237,8 +237,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
237} 237}
238 238
239static void rate_control_pid_tx_status(void *priv, struct net_device *dev, 239static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
240 struct sk_buff *skb, 240 struct sk_buff *skb)
241 struct ieee80211_tx_status *status)
242{ 241{
243 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 242 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
244 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 243 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -248,6 +247,7 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
248 struct rc_pid_sta_info *spinfo; 247 struct rc_pid_sta_info *spinfo;
249 unsigned long period; 248 unsigned long period;
250 struct ieee80211_supported_band *sband; 249 struct ieee80211_supported_band *sband;
250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
251 251
252 rcu_read_lock(); 252 rcu_read_lock();
253 253
@@ -266,28 +266,28 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
266 266
267 /* Ignore all frames that were sent with a different rate than the rate 267 /* Ignore all frames that were sent with a different rate than the rate
268 * we currently advise mac80211 to use. */ 268 * we currently advise mac80211 to use. */
269 if (status->control.tx_rate != &sband->bitrates[sta->txrate_idx]) 269 if (info->tx_rate_idx != sta->txrate_idx)
270 goto unlock; 270 goto unlock;
271 271
272 spinfo = sta->rate_ctrl_priv; 272 spinfo = sta->rate_ctrl_priv;
273 spinfo->tx_num_xmit++; 273 spinfo->tx_num_xmit++;
274 274
275#ifdef CONFIG_MAC80211_DEBUGFS 275#ifdef CONFIG_MAC80211_DEBUGFS
276 rate_control_pid_event_tx_status(&spinfo->events, status); 276 rate_control_pid_event_tx_status(&spinfo->events, info);
277#endif 277#endif
278 278
279 /* We count frames that totally failed to be transmitted as two bad 279 /* We count frames that totally failed to be transmitted as two bad
280 * frames, those that made it out but had some retries as one good and 280 * frames, those that made it out but had some retries as one good and
281 * one bad frame. */ 281 * one bad frame. */
282 if (status->excessive_retries) { 282 if (info->status.excessive_retries) {
283 spinfo->tx_num_failed += 2; 283 spinfo->tx_num_failed += 2;
284 spinfo->tx_num_xmit++; 284 spinfo->tx_num_xmit++;
285 } else if (status->retry_count) { 285 } else if (info->status.retry_count) {
286 spinfo->tx_num_failed++; 286 spinfo->tx_num_failed++;
287 spinfo->tx_num_xmit++; 287 spinfo->tx_num_xmit++;
288 } 288 }
289 289
290 if (status->excessive_retries) { 290 if (info->status.excessive_retries) {
291 sta->tx_retry_failed++; 291 sta->tx_retry_failed++;
292 sta->tx_num_consecutive_failures++; 292 sta->tx_num_consecutive_failures++;
293 sta->tx_num_mpdu_fail++; 293 sta->tx_num_mpdu_fail++;
@@ -295,8 +295,8 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
295 sta->tx_num_consecutive_failures = 0; 295 sta->tx_num_consecutive_failures = 0;
296 sta->tx_num_mpdu_ok++; 296 sta->tx_num_mpdu_ok++;
297 } 297 }
298 sta->tx_retry_count += status->retry_count; 298 sta->tx_retry_count += info->status.retry_count;
299 sta->tx_num_mpdu_fail += status->retry_count; 299 sta->tx_num_mpdu_fail += info->status.retry_count;
300 300
301 /* Update PID controller state. */ 301 /* Update PID controller state. */
302 period = (HZ * pinfo->sampling_period + 500) / 1000; 302 period = (HZ * pinfo->sampling_period + 500) / 1000;
@@ -330,7 +330,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
330 fc = le16_to_cpu(hdr->frame_control); 330 fc = le16_to_cpu(hdr->frame_control);
331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
332 is_multicast_ether_addr(hdr->addr1) || !sta) { 332 is_multicast_ether_addr(hdr->addr1) || !sta) {
333 sel->rate = rate_lowest(local, sband, sta); 333 sel->rate_idx = rate_lowest_index(local, sband, sta);
334 rcu_read_unlock(); 334 rcu_read_unlock();
335 return; 335 return;
336 } 336 }
@@ -349,7 +349,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
349 349
350 rcu_read_unlock(); 350 rcu_read_unlock();
351 351
352 sel->rate = &sband->bitrates[rateidx]; 352 sel->rate_idx = rateidx;
353 353
354#ifdef CONFIG_MAC80211_DEBUGFS 354#ifdef CONFIG_MAC80211_DEBUGFS
355 rate_control_pid_event_tx_rate( 355 rate_control_pid_event_tx_rate(
@@ -540,11 +540,6 @@ static struct rate_control_ops mac80211_rcpid = {
540#endif 540#endif
541}; 541};
542 542
543MODULE_DESCRIPTION("PID controller based rate control algorithm");
544MODULE_AUTHOR("Stefano Brivio");
545MODULE_AUTHOR("Mattias Nissler");
546MODULE_LICENSE("GPL");
547
548int __init rc80211_pid_init(void) 543int __init rc80211_pid_init(void)
549{ 544{
550 return ieee80211_rate_control_register(&mac80211_rcpid); 545 return ieee80211_rate_control_register(&mac80211_rcpid);
@@ -554,8 +549,3 @@ void rc80211_pid_exit(void)
554{ 549{
555 ieee80211_rate_control_unregister(&mac80211_rcpid); 550 ieee80211_rate_control_unregister(&mac80211_rcpid);
556} 551}
557
558#ifdef CONFIG_MAC80211_RC_PID_MODULE
559module_init(rc80211_pid_init);
560module_exit(rc80211_pid_exit);
561#endif
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index ff5c380f3c13..8121d3bc6835 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -39,11 +39,11 @@ static void rate_control_pid_event(struct rc_pid_event_buffer *buf,
39} 39}
40 40
41void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, 41void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
42 struct ieee80211_tx_status *stat) 42 struct ieee80211_tx_info *stat)
43{ 43{
44 union rc_pid_event_data evd; 44 union rc_pid_event_data evd;
45 45
46 memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_status)); 46 memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_info));
47 rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd); 47 rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd);
48} 48}
49 49
@@ -167,8 +167,8 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
167 switch (ev->type) { 167 switch (ev->type) {
168 case RC_PID_EVENT_TYPE_TX_STATUS: 168 case RC_PID_EVENT_TYPE_TX_STATUS:
169 p += snprintf(pb + p, length - p, "tx_status %u %u", 169 p += snprintf(pb + p, length - p, "tx_status %u %u",
170 ev->data.tx_status.excessive_retries, 170 ev->data.tx_status.status.excessive_retries,
171 ev->data.tx_status.retry_count); 171 ev->data.tx_status.status.retry_count);
172 break; 172 break;
173 case RC_PID_EVENT_TYPE_RATE_CHANGE: 173 case RC_PID_EVENT_TYPE_RATE_CHANGE:
174 p += snprintf(pb + p, length - p, "rate_change %d %d", 174 p += snprintf(pb + p, length - p, "rate_change %d %d",
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0941e5d6a522..6a88e8f9bff0 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -61,22 +61,147 @@ static inline int should_drop_frame(struct ieee80211_rx_status *status,
61 int present_fcs_len, 61 int present_fcs_len,
62 int radiotap_len) 62 int radiotap_len)
63{ 63{
64 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 64 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
65 65
66 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 66 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
67 return 1; 67 return 1;
68 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) 68 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
69 return 1; 69 return 1;
70 if (((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == 70 if (ieee80211_is_ctl(hdr->frame_control) &&
71 cpu_to_le16(IEEE80211_FTYPE_CTL)) && 71 !ieee80211_is_pspoll(hdr->frame_control) &&
72 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) != 72 !ieee80211_is_back_req(hdr->frame_control))
73 cpu_to_le16(IEEE80211_STYPE_PSPOLL)) &&
74 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) !=
75 cpu_to_le16(IEEE80211_STYPE_BACK_REQ)))
76 return 1; 73 return 1;
77 return 0; 74 return 0;
78} 75}
79 76
77static int
78ieee80211_rx_radiotap_len(struct ieee80211_local *local,
79 struct ieee80211_rx_status *status)
80{
81 int len;
82
83 /* always present fields */
84 len = sizeof(struct ieee80211_radiotap_header) + 9;
85
86 if (status->flag & RX_FLAG_TSFT)
87 len += 8;
88 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB ||
89 local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
90 len += 1;
91 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
92 len += 1;
93
94 if (len & 1) /* padding for RX_FLAGS if necessary */
95 len++;
96
97 /* make sure radiotap starts at a naturally aligned address */
98 if (len % 8)
99 len = roundup(len, 8);
100
101 return len;
102}
103
104/**
105 * ieee80211_add_rx_radiotap_header - add radiotap header
106 *
107 * add a radiotap header containing all the fields which the hardware provided.
108 */
109static void
110ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
111 struct sk_buff *skb,
112 struct ieee80211_rx_status *status,
113 struct ieee80211_rate *rate,
114 int rtap_len)
115{
116 struct ieee80211_radiotap_header *rthdr;
117 unsigned char *pos;
118
119 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
120 memset(rthdr, 0, rtap_len);
121
122 /* radiotap header, set always present flags */
123 rthdr->it_present =
124 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
125 (1 << IEEE80211_RADIOTAP_RATE) |
126 (1 << IEEE80211_RADIOTAP_CHANNEL) |
127 (1 << IEEE80211_RADIOTAP_ANTENNA) |
128 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
129 rthdr->it_len = cpu_to_le16(rtap_len);
130
131 pos = (unsigned char *)(rthdr+1);
132
133 /* the order of the following fields is important */
134
135 /* IEEE80211_RADIOTAP_TSFT */
136 if (status->flag & RX_FLAG_TSFT) {
137 *(__le64 *)pos = cpu_to_le64(status->mactime);
138 rthdr->it_present |=
139 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
140 pos += 8;
141 }
142
143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 pos++;
147
148 /* IEEE80211_RADIOTAP_RATE */
149 *pos = rate->bitrate / 5;
150 pos++;
151
152 /* IEEE80211_RADIOTAP_CHANNEL */
153 *(__le16 *)pos = cpu_to_le16(status->freq);
154 pos += 2;
155 if (status->band == IEEE80211_BAND_5GHZ)
156 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
157 IEEE80211_CHAN_5GHZ);
158 else
159 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN |
160 IEEE80211_CHAN_2GHZ);
161 pos += 2;
162
163 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
164 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
165 *pos = status->signal;
166 rthdr->it_present |=
167 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
168 pos++;
169 }
170
171 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
172 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
173 *pos = status->noise;
174 rthdr->it_present |=
175 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
176 pos++;
177 }
178
179 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
180
181 /* IEEE80211_RADIOTAP_ANTENNA */
182 *pos = status->antenna;
183 pos++;
184
185 /* IEEE80211_RADIOTAP_DB_ANTSIGNAL */
186 if (local->hw.flags & IEEE80211_HW_SIGNAL_DB) {
187 *pos = status->signal;
188 rthdr->it_present |=
189 cpu_to_le32(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL);
190 pos++;
191 }
192
193 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
194
195 /* IEEE80211_RADIOTAP_RX_FLAGS */
196 /* ensure 2 byte alignment for the 2 byte field as required */
197 if ((pos - (unsigned char *)rthdr) & 1)
198 pos++;
199 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
200 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
201 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
202 pos += 2;
203}
204
80/* 205/*
81 * This function copies a received frame to all monitor interfaces and 206 * This function copies a received frame to all monitor interfaces and
82 * returns a cleaned-up SKB that no longer includes the FCS nor the 207 * returns a cleaned-up SKB that no longer includes the FCS nor the
@@ -89,17 +214,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
89{ 214{
90 struct ieee80211_sub_if_data *sdata; 215 struct ieee80211_sub_if_data *sdata;
91 int needed_headroom = 0; 216 int needed_headroom = 0;
92 struct ieee80211_radiotap_header *rthdr;
93 __le64 *rttsft = NULL;
94 struct ieee80211_rtap_fixed_data {
95 u8 flags;
96 u8 rate;
97 __le16 chan_freq;
98 __le16 chan_flags;
99 u8 antsignal;
100 u8 padding_for_rxflags;
101 __le16 rx_flags;
102 } __attribute__ ((packed)) *rtfixed;
103 struct sk_buff *skb, *skb2; 217 struct sk_buff *skb, *skb2;
104 struct net_device *prev_dev = NULL; 218 struct net_device *prev_dev = NULL;
105 int present_fcs_len = 0; 219 int present_fcs_len = 0;
@@ -116,8 +230,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
116 if (status->flag & RX_FLAG_RADIOTAP) 230 if (status->flag & RX_FLAG_RADIOTAP)
117 rtap_len = ieee80211_get_radiotap_len(origskb->data); 231 rtap_len = ieee80211_get_radiotap_len(origskb->data);
118 else 232 else
119 /* room for radiotap header, always present fields and TSFT */ 233 /* room for the radiotap header based on driver features */
120 needed_headroom = sizeof(*rthdr) + sizeof(*rtfixed) + 8; 234 needed_headroom = ieee80211_rx_radiotap_len(local, status);
121 235
122 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 236 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
123 present_fcs_len = FCS_LEN; 237 present_fcs_len = FCS_LEN;
@@ -163,55 +277,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
163 } 277 }
164 278
165 /* if necessary, prepend radiotap information */ 279 /* if necessary, prepend radiotap information */
166 if (!(status->flag & RX_FLAG_RADIOTAP)) { 280 if (!(status->flag & RX_FLAG_RADIOTAP))
167 rtfixed = (void *) skb_push(skb, sizeof(*rtfixed)); 281 ieee80211_add_rx_radiotap_header(local, skb, status, rate,
168 rtap_len = sizeof(*rthdr) + sizeof(*rtfixed); 282 needed_headroom);
169 if (status->flag & RX_FLAG_TSFT) {
170 rttsft = (void *) skb_push(skb, sizeof(*rttsft));
171 rtap_len += 8;
172 }
173 rthdr = (void *) skb_push(skb, sizeof(*rthdr));
174 memset(rthdr, 0, sizeof(*rthdr));
175 memset(rtfixed, 0, sizeof(*rtfixed));
176 rthdr->it_present =
177 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
178 (1 << IEEE80211_RADIOTAP_RATE) |
179 (1 << IEEE80211_RADIOTAP_CHANNEL) |
180 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |
181 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
182 rtfixed->flags = 0;
183 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
184 rtfixed->flags |= IEEE80211_RADIOTAP_F_FCS;
185
186 if (rttsft) {
187 *rttsft = cpu_to_le64(status->mactime);
188 rthdr->it_present |=
189 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
190 }
191
192 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
193 rtfixed->rx_flags = 0;
194 if (status->flag &
195 (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
196 rtfixed->rx_flags |=
197 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
198
199 rtfixed->rate = rate->bitrate / 5;
200
201 rtfixed->chan_freq = cpu_to_le16(status->freq);
202
203 if (status->band == IEEE80211_BAND_5GHZ)
204 rtfixed->chan_flags =
205 cpu_to_le16(IEEE80211_CHAN_OFDM |
206 IEEE80211_CHAN_5GHZ);
207 else
208 rtfixed->chan_flags =
209 cpu_to_le16(IEEE80211_CHAN_DYN |
210 IEEE80211_CHAN_2GHZ);
211
212 rtfixed->antsignal = status->ssi;
213 rthdr->it_len = cpu_to_le16(rtap_len);
214 }
215 283
216 skb_reset_mac_header(skb); 284 skb_reset_mac_header(skb);
217 skb->ip_summed = CHECKSUM_UNNECESSARY; 285 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -275,11 +343,6 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
275 } 343 }
276 } 344 }
277 345
278 I802_DEBUG_INC(rx->local->wme_rx_queue[tid]);
279 /* only a debug counter, sta might not be assigned properly yet */
280 if (rx->sta)
281 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
282
283 rx->queue = tid; 346 rx->queue = tid;
284 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 347 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
285 * For now, set skb->priority to 0 for other cases. */ 348 * For now, set skb->priority to 0 for other cases. */
@@ -321,51 +384,9 @@ static void ieee80211_verify_ip_alignment(struct ieee80211_rx_data *rx)
321} 384}
322 385
323 386
324static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
325 struct sk_buff *skb,
326 struct ieee80211_rx_status *status,
327 struct ieee80211_rate *rate)
328{
329 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
330 u32 load = 0, hdrtime;
331
332 /* Estimate total channel use caused by this frame */
333
334 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
335 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
336
337 if (status->band == IEEE80211_BAND_5GHZ ||
338 (status->band == IEEE80211_BAND_5GHZ &&
339 rate->flags & IEEE80211_RATE_ERP_G))
340 hdrtime = CHAN_UTIL_HDR_SHORT;
341 else
342 hdrtime = CHAN_UTIL_HDR_LONG;
343
344 load = hdrtime;
345 if (!is_multicast_ether_addr(hdr->addr1))
346 load += hdrtime;
347
348 /* TODO: optimise again */
349 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
350
351 /* Divide channel_use by 8 to avoid wrapping around the counter */
352 load >>= CHAN_UTIL_SHIFT;
353
354 return load;
355}
356
357/* rx handlers */ 387/* rx handlers */
358 388
359static ieee80211_rx_result 389static ieee80211_rx_result debug_noinline
360ieee80211_rx_h_if_stats(struct ieee80211_rx_data *rx)
361{
362 if (rx->sta)
363 rx->sta->channel_use_raw += rx->load;
364 rx->sdata->channel_use_raw += rx->load;
365 return RX_CONTINUE;
366}
367
368static ieee80211_rx_result
369ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) 390ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
370{ 391{
371 struct ieee80211_local *local = rx->local; 392 struct ieee80211_local *local = rx->local;
@@ -442,7 +463,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
442} 463}
443 464
444 465
445static ieee80211_rx_result 466static ieee80211_rx_result debug_noinline
446ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 467ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
447{ 468{
448 struct ieee80211_hdr *hdr; 469 struct ieee80211_hdr *hdr;
@@ -484,7 +505,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
484 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && 505 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL &&
485 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && 506 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) &&
486 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 507 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
487 (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) { 508 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
488 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && 509 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) &&
489 !(rx->fc & IEEE80211_FCTL_TODS) && 510 !(rx->fc & IEEE80211_FCTL_TODS) &&
490 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 511 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
@@ -501,7 +522,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
501} 522}
502 523
503 524
504static ieee80211_rx_result 525static ieee80211_rx_result debug_noinline
505ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 526ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
506{ 527{
507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 528 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
@@ -592,11 +613,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
592 rx->key->tx_rx_count++; 613 rx->key->tx_rx_count++;
593 /* TODO: add threshold stuff again */ 614 /* TODO: add threshold stuff again */
594 } else { 615 } else {
595#ifdef CONFIG_MAC80211_DEBUG
596 if (net_ratelimit())
597 printk(KERN_DEBUG "%s: RX protected frame,"
598 " but have no key\n", rx->dev->name);
599#endif /* CONFIG_MAC80211_DEBUG */
600 return RX_DROP_MONITOR; 616 return RX_DROP_MONITOR;
601 } 617 }
602 618
@@ -635,8 +651,7 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
635 651
636 if (sdata->bss) 652 if (sdata->bss)
637 atomic_inc(&sdata->bss->num_sta_ps); 653 atomic_inc(&sdata->bss->num_sta_ps);
638 sta->flags |= WLAN_STA_PS; 654 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
639 sta->flags &= ~WLAN_STA_PSPOLL;
640#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 655#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
641 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 656 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
642 dev->name, print_mac(mac, sta->addr), sta->aid); 657 dev->name, print_mac(mac, sta->addr), sta->aid);
@@ -649,7 +664,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
649 struct sk_buff *skb; 664 struct sk_buff *skb;
650 int sent = 0; 665 int sent = 0;
651 struct ieee80211_sub_if_data *sdata; 666 struct ieee80211_sub_if_data *sdata;
652 struct ieee80211_tx_packet_data *pkt_data; 667 struct ieee80211_tx_info *info;
653 DECLARE_MAC_BUF(mac); 668 DECLARE_MAC_BUF(mac);
654 669
655 sdata = sta->sdata; 670 sdata = sta->sdata;
@@ -657,7 +672,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
657 if (sdata->bss) 672 if (sdata->bss)
658 atomic_dec(&sdata->bss->num_sta_ps); 673 atomic_dec(&sdata->bss->num_sta_ps);
659 674
660 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_PSPOLL); 675 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
661 676
662 if (!skb_queue_empty(&sta->ps_tx_buf)) 677 if (!skb_queue_empty(&sta->ps_tx_buf))
663 sta_info_clear_tim_bit(sta); 678 sta_info_clear_tim_bit(sta);
@@ -669,13 +684,13 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
669 684
670 /* Send all buffered frames to the station */ 685 /* Send all buffered frames to the station */
671 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 686 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
672 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 687 info = IEEE80211_SKB_CB(skb);
673 sent++; 688 sent++;
674 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 689 info->flags |= IEEE80211_TX_CTL_REQUEUE;
675 dev_queue_xmit(skb); 690 dev_queue_xmit(skb);
676 } 691 }
677 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { 692 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
678 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 693 info = IEEE80211_SKB_CB(skb);
679 local->total_ps_buffered--; 694 local->total_ps_buffered--;
680 sent++; 695 sent++;
681#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 696#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -683,14 +698,14 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
683 "since STA not sleeping anymore\n", dev->name, 698 "since STA not sleeping anymore\n", dev->name,
684 print_mac(mac, sta->addr), sta->aid); 699 print_mac(mac, sta->addr), sta->aid);
685#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 700#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
686 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 701 info->flags |= IEEE80211_TX_CTL_REQUEUE;
687 dev_queue_xmit(skb); 702 dev_queue_xmit(skb);
688 } 703 }
689 704
690 return sent; 705 return sent;
691} 706}
692 707
693static ieee80211_rx_result 708static ieee80211_rx_result debug_noinline
694ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 709ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
695{ 710{
696 struct sta_info *sta = rx->sta; 711 struct sta_info *sta = rx->sta;
@@ -725,16 +740,17 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
725 740
726 sta->rx_fragments++; 741 sta->rx_fragments++;
727 sta->rx_bytes += rx->skb->len; 742 sta->rx_bytes += rx->skb->len;
728 sta->last_rssi = rx->status->ssi;
729 sta->last_signal = rx->status->signal; 743 sta->last_signal = rx->status->signal;
744 sta->last_qual = rx->status->qual;
730 sta->last_noise = rx->status->noise; 745 sta->last_noise = rx->status->noise;
731 746
732 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { 747 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) {
733 /* Change STA power saving mode only in the end of a frame 748 /* Change STA power saving mode only in the end of a frame
734 * exchange sequence */ 749 * exchange sequence */
735 if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) 750 if (test_sta_flags(sta, WLAN_STA_PS) &&
751 !(rx->fc & IEEE80211_FCTL_PM))
736 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); 752 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta);
737 else if (!(sta->flags & WLAN_STA_PS) && 753 else if (!test_sta_flags(sta, WLAN_STA_PS) &&
738 (rx->fc & IEEE80211_FCTL_PM)) 754 (rx->fc & IEEE80211_FCTL_PM))
739 ap_sta_ps_start(dev, sta); 755 ap_sta_ps_start(dev, sta);
740 } 756 }
@@ -768,7 +784,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
768 sdata->fragment_next = 0; 784 sdata->fragment_next = 0;
769 785
770 if (!skb_queue_empty(&entry->skb_list)) { 786 if (!skb_queue_empty(&entry->skb_list)) {
771#ifdef CONFIG_MAC80211_DEBUG 787#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
772 struct ieee80211_hdr *hdr = 788 struct ieee80211_hdr *hdr =
773 (struct ieee80211_hdr *) entry->skb_list.next->data; 789 (struct ieee80211_hdr *) entry->skb_list.next->data;
774 DECLARE_MAC_BUF(mac); 790 DECLARE_MAC_BUF(mac);
@@ -780,7 +796,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
780 jiffies - entry->first_frag_time, entry->seq, 796 jiffies - entry->first_frag_time, entry->seq,
781 entry->last_frag, print_mac(mac, hdr->addr1), 797 entry->last_frag, print_mac(mac, hdr->addr1),
782 print_mac(mac2, hdr->addr2)); 798 print_mac(mac2, hdr->addr2));
783#endif /* CONFIG_MAC80211_DEBUG */ 799#endif
784 __skb_queue_purge(&entry->skb_list); 800 __skb_queue_purge(&entry->skb_list);
785 } 801 }
786 802
@@ -837,7 +853,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
837 return NULL; 853 return NULL;
838} 854}
839 855
840static ieee80211_rx_result 856static ieee80211_rx_result debug_noinline
841ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 857ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
842{ 858{
843 struct ieee80211_hdr *hdr; 859 struct ieee80211_hdr *hdr;
@@ -901,18 +917,8 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
901 break; 917 break;
902 } 918 }
903 rpn = rx->key->u.ccmp.rx_pn[rx->queue]; 919 rpn = rx->key->u.ccmp.rx_pn[rx->queue];
904 if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { 920 if (memcmp(pn, rpn, CCMP_PN_LEN))
905 if (net_ratelimit())
906 printk(KERN_DEBUG "%s: defrag: CCMP PN not "
907 "sequential A2=%s"
908 " PN=%02x%02x%02x%02x%02x%02x "
909 "(expected %02x%02x%02x%02x%02x%02x)\n",
910 rx->dev->name, print_mac(mac, hdr->addr2),
911 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4],
912 rpn[5], pn[0], pn[1], pn[2], pn[3],
913 pn[4], pn[5]);
914 return RX_DROP_UNUSABLE; 921 return RX_DROP_UNUSABLE;
915 }
916 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 922 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
917 } 923 }
918 924
@@ -953,7 +959,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
953 return RX_CONTINUE; 959 return RX_CONTINUE;
954} 960}
955 961
956static ieee80211_rx_result 962static ieee80211_rx_result debug_noinline
957ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) 963ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
958{ 964{
959 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 965 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
@@ -988,7 +994,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
988 * Tell TX path to send one frame even though the STA may 994 * Tell TX path to send one frame even though the STA may
989 * still remain is PS mode after this frame exchange. 995 * still remain is PS mode after this frame exchange.
990 */ 996 */
991 rx->sta->flags |= WLAN_STA_PSPOLL; 997 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
992 998
993#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 999#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
994 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 1000 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
@@ -1016,7 +1022,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1016 * have nothing buffered for it? 1022 * have nothing buffered for it?
1017 */ 1023 */
1018 printk(KERN_DEBUG "%s: STA %s sent PS Poll even " 1024 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
1019 "though there is no buffered frames for it\n", 1025 "though there are no buffered frames for it\n",
1020 rx->dev->name, print_mac(mac, rx->sta->addr)); 1026 rx->dev->name, print_mac(mac, rx->sta->addr));
1021#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1027#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1022 } 1028 }
@@ -1028,7 +1034,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1028 return RX_QUEUED; 1034 return RX_QUEUED;
1029} 1035}
1030 1036
1031static ieee80211_rx_result 1037static ieee80211_rx_result debug_noinline
1032ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) 1038ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1033{ 1039{
1034 u16 fc = rx->fc; 1040 u16 fc = rx->fc;
@@ -1051,14 +1057,9 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1051static int 1057static int
1052ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1058ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1053{ 1059{
1054 if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) { 1060 if (unlikely(!rx->sta ||
1055#ifdef CONFIG_MAC80211_DEBUG 1061 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1056 if (net_ratelimit())
1057 printk(KERN_DEBUG "%s: dropped frame "
1058 "(unauthorized port)\n", rx->dev->name);
1059#endif /* CONFIG_MAC80211_DEBUG */
1060 return -EACCES; 1062 return -EACCES;
1061 }
1062 1063
1063 return 0; 1064 return 0;
1064} 1065}
@@ -1138,16 +1139,8 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1138 memcpy(src, hdr->addr2, ETH_ALEN); 1139 memcpy(src, hdr->addr2, ETH_ALEN);
1139 1140
1140 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && 1141 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
1141 sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) { 1142 sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
1142 if (net_ratelimit())
1143 printk(KERN_DEBUG "%s: dropped ToDS frame "
1144 "(BSSID=%s SA=%s DA=%s)\n",
1145 dev->name,
1146 print_mac(mac, hdr->addr1),
1147 print_mac(mac2, hdr->addr2),
1148 print_mac(mac3, hdr->addr3));
1149 return -1; 1143 return -1;
1150 }
1151 break; 1144 break;
1152 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 1145 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1153 /* RA TA DA SA */ 1146 /* RA TA DA SA */
@@ -1155,17 +1148,8 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1155 memcpy(src, hdr->addr4, ETH_ALEN); 1148 memcpy(src, hdr->addr4, ETH_ALEN);
1156 1149
1157 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && 1150 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1158 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) { 1151 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
1159 if (net_ratelimit())
1160 printk(KERN_DEBUG "%s: dropped FromDS&ToDS "
1161 "frame (RA=%s TA=%s DA=%s SA=%s)\n",
1162 rx->dev->name,
1163 print_mac(mac, hdr->addr1),
1164 print_mac(mac2, hdr->addr2),
1165 print_mac(mac3, hdr->addr3),
1166 print_mac(mac4, hdr->addr4));
1167 return -1; 1152 return -1;
1168 }
1169 break; 1153 break;
1170 case IEEE80211_FCTL_FROMDS: 1154 case IEEE80211_FCTL_FROMDS:
1171 /* DA BSSID SA */ 1155 /* DA BSSID SA */
@@ -1182,27 +1166,13 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1182 memcpy(dst, hdr->addr1, ETH_ALEN); 1166 memcpy(dst, hdr->addr1, ETH_ALEN);
1183 memcpy(src, hdr->addr2, ETH_ALEN); 1167 memcpy(src, hdr->addr2, ETH_ALEN);
1184 1168
1185 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { 1169 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1186 if (net_ratelimit()) {
1187 printk(KERN_DEBUG "%s: dropped IBSS frame "
1188 "(DA=%s SA=%s BSSID=%s)\n",
1189 dev->name,
1190 print_mac(mac, hdr->addr1),
1191 print_mac(mac2, hdr->addr2),
1192 print_mac(mac3, hdr->addr3));
1193 }
1194 return -1; 1170 return -1;
1195 }
1196 break; 1171 break;
1197 } 1172 }
1198 1173
1199 if (unlikely(skb->len - hdrlen < 8)) { 1174 if (unlikely(skb->len - hdrlen < 8))
1200 if (net_ratelimit()) {
1201 printk(KERN_DEBUG "%s: RX too short data frame "
1202 "payload\n", dev->name);
1203 }
1204 return -1; 1175 return -1;
1205 }
1206 1176
1207 payload = skb->data + hdrlen; 1177 payload = skb->data + hdrlen;
1208 ethertype = (payload[6] << 8) | payload[7]; 1178 ethertype = (payload[6] << 8) | payload[7];
@@ -1345,7 +1315,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1345 } 1315 }
1346} 1316}
1347 1317
1348static ieee80211_rx_result 1318static ieee80211_rx_result debug_noinline
1349ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1319ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1350{ 1320{
1351 struct net_device *dev = rx->dev; 1321 struct net_device *dev = rx->dev;
@@ -1394,10 +1364,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1394 1364
1395 padding = ((4 - subframe_len) & 0x3); 1365 padding = ((4 - subframe_len) & 0x3);
1396 /* the last MSDU has no padding */ 1366 /* the last MSDU has no padding */
1397 if (subframe_len > remaining) { 1367 if (subframe_len > remaining)
1398 printk(KERN_DEBUG "%s: wrong buffer size\n", dev->name);
1399 return RX_DROP_UNUSABLE; 1368 return RX_DROP_UNUSABLE;
1400 }
1401 1369
1402 skb_pull(skb, sizeof(struct ethhdr)); 1370 skb_pull(skb, sizeof(struct ethhdr));
1403 /* if last subframe reuse skb */ 1371 /* if last subframe reuse skb */
@@ -1418,8 +1386,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1418 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + 1386 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1419 padding); 1387 padding);
1420 if (!eth) { 1388 if (!eth) {
1421 printk(KERN_DEBUG "%s: wrong buffer size\n",
1422 dev->name);
1423 dev_kfree_skb(frame); 1389 dev_kfree_skb(frame);
1424 return RX_DROP_UNUSABLE; 1390 return RX_DROP_UNUSABLE;
1425 } 1391 }
@@ -1462,7 +1428,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1462 return RX_QUEUED; 1428 return RX_QUEUED;
1463} 1429}
1464 1430
1465static ieee80211_rx_result 1431static ieee80211_rx_result debug_noinline
1466ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1432ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1467{ 1433{
1468 struct net_device *dev = rx->dev; 1434 struct net_device *dev = rx->dev;
@@ -1493,7 +1459,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1493 return RX_QUEUED; 1459 return RX_QUEUED;
1494} 1460}
1495 1461
1496static ieee80211_rx_result 1462static ieee80211_rx_result debug_noinline
1497ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 1463ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1498{ 1464{
1499 struct ieee80211_local *local = rx->local; 1465 struct ieee80211_local *local = rx->local;
@@ -1537,7 +1503,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1537 return RX_CONTINUE; 1503 return RX_CONTINUE;
1538} 1504}
1539 1505
1540static ieee80211_rx_result 1506static ieee80211_rx_result debug_noinline
1541ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 1507ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1542{ 1508{
1543 struct ieee80211_sub_if_data *sdata; 1509 struct ieee80211_sub_if_data *sdata;
@@ -1571,31 +1537,16 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1571 else 1537 else
1572 keyidx = -1; 1538 keyidx = -1;
1573 1539
1574 if (net_ratelimit())
1575 printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC "
1576 "failure from %s to %s keyidx=%d\n",
1577 dev->name, print_mac(mac, hdr->addr2),
1578 print_mac(mac2, hdr->addr1), keyidx);
1579
1580 if (!rx->sta) { 1540 if (!rx->sta) {
1581 /* 1541 /*
1582 * Some hardware seem to generate incorrect Michael MIC 1542 * Some hardware seem to generate incorrect Michael MIC
1583 * reports; ignore them to avoid triggering countermeasures. 1543 * reports; ignore them to avoid triggering countermeasures.
1584 */ 1544 */
1585 if (net_ratelimit())
1586 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1587 "error for unknown address %s\n",
1588 dev->name, print_mac(mac, hdr->addr2));
1589 goto ignore; 1545 goto ignore;
1590 } 1546 }
1591 1547
1592 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) { 1548 if (!(rx->fc & IEEE80211_FCTL_PROTECTED))
1593 if (net_ratelimit())
1594 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1595 "error for a frame with no PROTECTED flag (src "
1596 "%s)\n", dev->name, print_mac(mac, hdr->addr2));
1597 goto ignore; 1549 goto ignore;
1598 }
1599 1550
1600 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { 1551 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) {
1601 /* 1552 /*
@@ -1604,24 +1555,13 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1604 * group keys and only the AP is sending real multicast 1555 * group keys and only the AP is sending real multicast
1605 * frames in the BSS. 1556 * frames in the BSS.
1606 */ 1557 */
1607 if (net_ratelimit())
1608 printk(KERN_DEBUG "%s: ignored Michael MIC error for "
1609 "a frame with non-zero keyidx (%d)"
1610 " (src %s)\n", dev->name, keyidx,
1611 print_mac(mac, hdr->addr2));
1612 goto ignore; 1558 goto ignore;
1613 } 1559 }
1614 1560
1615 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 1561 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA &&
1616 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 1562 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
1617 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) { 1563 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH))
1618 if (net_ratelimit())
1619 printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
1620 "error for a frame that cannot be encrypted "
1621 "(fc=0x%04x) (src %s)\n",
1622 dev->name, rx->fc, print_mac(mac, hdr->addr2));
1623 goto ignore; 1564 goto ignore;
1624 }
1625 1565
1626 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); 1566 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr);
1627 ignore: 1567 ignore:
@@ -1710,67 +1650,57 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1710 dev_kfree_skb(skb); 1650 dev_kfree_skb(skb);
1711} 1651}
1712 1652
1713typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_rx_data *);
1714static ieee80211_rx_handler ieee80211_rx_handlers[] =
1715{
1716 ieee80211_rx_h_if_stats,
1717 ieee80211_rx_h_passive_scan,
1718 ieee80211_rx_h_check,
1719 ieee80211_rx_h_decrypt,
1720 ieee80211_rx_h_sta_process,
1721 ieee80211_rx_h_defragment,
1722 ieee80211_rx_h_ps_poll,
1723 ieee80211_rx_h_michael_mic_verify,
1724 /* this must be after decryption - so header is counted in MPDU mic
1725 * must be before pae and data, so QOS_DATA format frames
1726 * are not passed to user space by these functions
1727 */
1728 ieee80211_rx_h_remove_qos_control,
1729 ieee80211_rx_h_amsdu,
1730 ieee80211_rx_h_data,
1731 ieee80211_rx_h_ctrl,
1732 ieee80211_rx_h_mgmt,
1733 NULL
1734};
1735 1653
1736static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, 1654static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1737 struct ieee80211_rx_data *rx, 1655 struct ieee80211_rx_data *rx,
1738 struct sk_buff *skb) 1656 struct sk_buff *skb)
1739{ 1657{
1740 ieee80211_rx_handler *handler;
1741 ieee80211_rx_result res = RX_DROP_MONITOR; 1658 ieee80211_rx_result res = RX_DROP_MONITOR;
1742 1659
1743 rx->skb = skb; 1660 rx->skb = skb;
1744 rx->sdata = sdata; 1661 rx->sdata = sdata;
1745 rx->dev = sdata->dev; 1662 rx->dev = sdata->dev;
1746 1663
1747 for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) { 1664#define CALL_RXH(rxh) \
1748 res = (*handler)(rx); 1665 res = rxh(rx); \
1749 1666 if (res != RX_CONTINUE) \
1750 switch (res) { 1667 goto rxh_done;
1751 case RX_CONTINUE: 1668
1752 continue; 1669 CALL_RXH(ieee80211_rx_h_passive_scan)
1753 case RX_DROP_UNUSABLE: 1670 CALL_RXH(ieee80211_rx_h_check)
1754 case RX_DROP_MONITOR: 1671 CALL_RXH(ieee80211_rx_h_decrypt)
1755 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 1672 CALL_RXH(ieee80211_rx_h_sta_process)
1756 if (rx->sta) 1673 CALL_RXH(ieee80211_rx_h_defragment)
1757 rx->sta->rx_dropped++; 1674 CALL_RXH(ieee80211_rx_h_ps_poll)
1758 break; 1675 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
1759 case RX_QUEUED: 1676 /* must be after MMIC verify so header is counted in MPDU mic */
1760 I802_DEBUG_INC(sdata->local->rx_handlers_queued); 1677 CALL_RXH(ieee80211_rx_h_remove_qos_control)
1761 break; 1678 CALL_RXH(ieee80211_rx_h_amsdu)
1762 } 1679 CALL_RXH(ieee80211_rx_h_data)
1763 break; 1680 CALL_RXH(ieee80211_rx_h_ctrl)
1764 } 1681 CALL_RXH(ieee80211_rx_h_mgmt)
1765 1682
1683#undef CALL_RXH
1684
1685 rxh_done:
1766 switch (res) { 1686 switch (res) {
1767 case RX_CONTINUE:
1768 case RX_DROP_MONITOR: 1687 case RX_DROP_MONITOR:
1688 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1689 if (rx->sta)
1690 rx->sta->rx_dropped++;
1691 /* fall through */
1692 case RX_CONTINUE:
1769 ieee80211_rx_cooked_monitor(rx); 1693 ieee80211_rx_cooked_monitor(rx);
1770 break; 1694 break;
1771 case RX_DROP_UNUSABLE: 1695 case RX_DROP_UNUSABLE:
1696 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1697 if (rx->sta)
1698 rx->sta->rx_dropped++;
1772 dev_kfree_skb(rx->skb); 1699 dev_kfree_skb(rx->skb);
1773 break; 1700 break;
1701 case RX_QUEUED:
1702 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1703 break;
1774 } 1704 }
1775} 1705}
1776 1706
@@ -1802,8 +1732,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1802 if (!bssid) 1732 if (!bssid)
1803 return 0; 1733 return 0;
1804 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && 1734 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
1805 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) 1735 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) {
1736 if (!rx->sta)
1737 rx->sta = ieee80211_ibss_add_sta(sdata->dev,
1738 rx->skb, bssid, hdr->addr2,
1739 BIT(rx->status->rate_idx));
1806 return 1; 1740 return 1;
1741 }
1807 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1742 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1808 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 1743 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
1809 return 0; 1744 return 0;
@@ -1816,7 +1751,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1816 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1751 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1817 } else if (!rx->sta) 1752 } else if (!rx->sta)
1818 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, 1753 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb,
1819 bssid, hdr->addr2); 1754 bssid, hdr->addr2,
1755 BIT(rx->status->rate_idx));
1820 break; 1756 break;
1821 case IEEE80211_IF_TYPE_MESH_POINT: 1757 case IEEE80211_IF_TYPE_MESH_POINT:
1822 if (!multicast && 1758 if (!multicast &&
@@ -1872,7 +1808,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1872static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 1808static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1873 struct sk_buff *skb, 1809 struct sk_buff *skb,
1874 struct ieee80211_rx_status *status, 1810 struct ieee80211_rx_status *status,
1875 u32 load,
1876 struct ieee80211_rate *rate) 1811 struct ieee80211_rate *rate)
1877{ 1812{
1878 struct ieee80211_local *local = hw_to_local(hw); 1813 struct ieee80211_local *local = hw_to_local(hw);
@@ -1891,7 +1826,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1891 rx.local = local; 1826 rx.local = local;
1892 1827
1893 rx.status = status; 1828 rx.status = status;
1894 rx.load = load;
1895 rx.rate = rate; 1829 rx.rate = rate;
1896 rx.fc = le16_to_cpu(hdr->frame_control); 1830 rx.fc = le16_to_cpu(hdr->frame_control);
1897 type = rx.fc & IEEE80211_FCTL_FTYPE; 1831 type = rx.fc & IEEE80211_FCTL_FTYPE;
@@ -2000,7 +1934,6 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2000 struct ieee80211_rx_status status; 1934 struct ieee80211_rx_status status;
2001 u16 head_seq_num, buf_size; 1935 u16 head_seq_num, buf_size;
2002 int index; 1936 int index;
2003 u32 pkt_load;
2004 struct ieee80211_supported_band *sband; 1937 struct ieee80211_supported_band *sband;
2005 struct ieee80211_rate *rate; 1938 struct ieee80211_rate *rate;
2006 1939
@@ -2035,12 +1968,9 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2035 sizeof(status)); 1968 sizeof(status));
2036 sband = local->hw.wiphy->bands[status.band]; 1969 sband = local->hw.wiphy->bands[status.band];
2037 rate = &sband->bitrates[status.rate_idx]; 1970 rate = &sband->bitrates[status.rate_idx];
2038 pkt_load = ieee80211_rx_load_stats(local,
2039 tid_agg_rx->reorder_buf[index],
2040 &status, rate);
2041 __ieee80211_rx_handle_packet(hw, 1971 __ieee80211_rx_handle_packet(hw,
2042 tid_agg_rx->reorder_buf[index], 1972 tid_agg_rx->reorder_buf[index],
2043 &status, pkt_load, rate); 1973 &status, rate);
2044 tid_agg_rx->stored_mpdu_num--; 1974 tid_agg_rx->stored_mpdu_num--;
2045 tid_agg_rx->reorder_buf[index] = NULL; 1975 tid_agg_rx->reorder_buf[index] = NULL;
2046 } 1976 }
@@ -2082,11 +2012,8 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2082 sizeof(status)); 2012 sizeof(status));
2083 sband = local->hw.wiphy->bands[status.band]; 2013 sband = local->hw.wiphy->bands[status.band];
2084 rate = &sband->bitrates[status.rate_idx]; 2014 rate = &sband->bitrates[status.rate_idx];
2085 pkt_load = ieee80211_rx_load_stats(local,
2086 tid_agg_rx->reorder_buf[index],
2087 &status, rate);
2088 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 2015 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
2089 &status, pkt_load, rate); 2016 &status, rate);
2090 tid_agg_rx->stored_mpdu_num--; 2017 tid_agg_rx->stored_mpdu_num--;
2091 tid_agg_rx->reorder_buf[index] = NULL; 2018 tid_agg_rx->reorder_buf[index] = NULL;
2092 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 2019 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -2103,24 +2030,21 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2103 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2030 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2104 struct sta_info *sta; 2031 struct sta_info *sta;
2105 struct tid_ampdu_rx *tid_agg_rx; 2032 struct tid_ampdu_rx *tid_agg_rx;
2106 u16 fc, sc; 2033 u16 sc;
2107 u16 mpdu_seq_num; 2034 u16 mpdu_seq_num;
2108 u8 ret = 0, *qc; 2035 u8 ret = 0;
2109 int tid; 2036 int tid;
2110 2037
2111 sta = sta_info_get(local, hdr->addr2); 2038 sta = sta_info_get(local, hdr->addr2);
2112 if (!sta) 2039 if (!sta)
2113 return ret; 2040 return ret;
2114 2041
2115 fc = le16_to_cpu(hdr->frame_control);
2116
2117 /* filter the QoS data rx stream according to 2042 /* filter the QoS data rx stream according to
2118 * STA/TID and check if this STA/TID is on aggregation */ 2043 * STA/TID and check if this STA/TID is on aggregation */
2119 if (!WLAN_FC_IS_QOS_DATA(fc)) 2044 if (!ieee80211_is_data_qos(hdr->frame_control))
2120 goto end_reorder; 2045 goto end_reorder;
2121 2046
2122 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN; 2047 tid = *ieee80211_get_qos_ctl(hdr) & QOS_CONTROL_TID_MASK;
2123 tid = qc[0] & QOS_CONTROL_TID_MASK;
2124 2048
2125 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) 2049 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2126 goto end_reorder; 2050 goto end_reorder;
@@ -2128,7 +2052,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2128 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; 2052 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2129 2053
2130 /* null data frames are excluded */ 2054 /* null data frames are excluded */
2131 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC)) 2055 if (unlikely(ieee80211_is_nullfunc(hdr->frame_control)))
2132 goto end_reorder; 2056 goto end_reorder;
2133 2057
2134 /* new un-ordered ampdu frame - process it */ 2058 /* new un-ordered ampdu frame - process it */
@@ -2165,7 +2089,6 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2165 struct ieee80211_rx_status *status) 2089 struct ieee80211_rx_status *status)
2166{ 2090{
2167 struct ieee80211_local *local = hw_to_local(hw); 2091 struct ieee80211_local *local = hw_to_local(hw);
2168 u32 pkt_load;
2169 struct ieee80211_rate *rate = NULL; 2092 struct ieee80211_rate *rate = NULL;
2170 struct ieee80211_supported_band *sband; 2093 struct ieee80211_supported_band *sband;
2171 2094
@@ -2205,11 +2128,8 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2205 return; 2128 return;
2206 } 2129 }
2207 2130
2208 pkt_load = ieee80211_rx_load_stats(local, skb, status, rate);
2209 local->channel_use_raw += pkt_load;
2210
2211 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2131 if (!ieee80211_rx_reorder_ampdu(local, skb))
2212 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate); 2132 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2213 2133
2214 rcu_read_unlock(); 2134 rcu_read_unlock();
2215} 2135}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7d4fe4a52929..d8a16b7f6a6b 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -202,14 +202,12 @@ void sta_info_destroy(struct sta_info *sta)
202 dev_kfree_skb_any(skb); 202 dev_kfree_skb_any(skb);
203 203
204 for (i = 0; i < STA_TID_NUM; i++) { 204 for (i = 0; i < STA_TID_NUM; i++) {
205 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 205 spin_lock_bh(&sta->lock);
206 if (sta->ampdu_mlme.tid_rx[i]) 206 if (sta->ampdu_mlme.tid_rx[i])
207 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer); 207 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer);
208 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
209 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
210 if (sta->ampdu_mlme.tid_tx[i]) 208 if (sta->ampdu_mlme.tid_tx[i])
211 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer); 209 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer);
212 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 210 spin_unlock_bh(&sta->lock);
213 } 211 }
214 212
215 __sta_info_free(local, sta); 213 __sta_info_free(local, sta);
@@ -236,6 +234,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
236 if (!sta) 234 if (!sta)
237 return NULL; 235 return NULL;
238 236
237 spin_lock_init(&sta->lock);
238 spin_lock_init(&sta->flaglock);
239
239 memcpy(sta->addr, addr, ETH_ALEN); 240 memcpy(sta->addr, addr, ETH_ALEN);
240 sta->local = local; 241 sta->local = local;
241 sta->sdata = sdata; 242 sta->sdata = sdata;
@@ -249,15 +250,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
249 return NULL; 250 return NULL;
250 } 251 }
251 252
252 spin_lock_init(&sta->ampdu_mlme.ampdu_rx);
253 spin_lock_init(&sta->ampdu_mlme.ampdu_tx);
254 for (i = 0; i < STA_TID_NUM; i++) { 253 for (i = 0; i < STA_TID_NUM; i++) {
255 /* timer_to_tid must be initialized with identity mapping to 254 /* timer_to_tid must be initialized with identity mapping to
256 * enable session_timer's data differentiation. refer to 255 * enable session_timer's data differentiation. refer to
257 * sta_rx_agg_session_timer_expired for useage */ 256 * sta_rx_agg_session_timer_expired for useage */
258 sta->timer_to_tid[i] = i; 257 sta->timer_to_tid[i] = i;
259 /* tid to tx queue: initialize according to HW (0 is valid) */ 258 /* tid to tx queue: initialize according to HW (0 is valid) */
260 sta->tid_to_tx_q[i] = local->hw.queues; 259 sta->tid_to_tx_q[i] = ieee80211_num_queues(&local->hw);
261 /* rx */ 260 /* rx */
262 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; 261 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
263 sta->ampdu_mlme.tid_rx[i] = NULL; 262 sta->ampdu_mlme.tid_rx[i] = NULL;
@@ -276,7 +275,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
276 275
277#ifdef CONFIG_MAC80211_MESH 276#ifdef CONFIG_MAC80211_MESH
278 sta->plink_state = PLINK_LISTEN; 277 sta->plink_state = PLINK_LISTEN;
279 spin_lock_init(&sta->plink_lock);
280 init_timer(&sta->plink_timer); 278 init_timer(&sta->plink_timer);
281#endif 279#endif
282 280
@@ -437,8 +435,7 @@ void __sta_info_unlink(struct sta_info **sta)
437 435
438 list_del(&(*sta)->list); 436 list_del(&(*sta)->list);
439 437
440 if ((*sta)->flags & WLAN_STA_PS) { 438 if (test_and_clear_sta_flags(*sta, WLAN_STA_PS)) {
441 (*sta)->flags &= ~WLAN_STA_PS;
442 if (sdata->bss) 439 if (sdata->bss)
443 atomic_dec(&sdata->bss->num_sta_ps); 440 atomic_dec(&sdata->bss->num_sta_ps);
444 __sta_info_clear_tim_bit(sdata->bss, *sta); 441 __sta_info_clear_tim_bit(sdata->bss, *sta);
@@ -515,20 +512,20 @@ static inline int sta_info_buffer_expired(struct ieee80211_local *local,
515 struct sta_info *sta, 512 struct sta_info *sta,
516 struct sk_buff *skb) 513 struct sk_buff *skb)
517{ 514{
518 struct ieee80211_tx_packet_data *pkt_data; 515 struct ieee80211_tx_info *info;
519 int timeout; 516 int timeout;
520 517
521 if (!skb) 518 if (!skb)
522 return 0; 519 return 0;
523 520
524 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 521 info = IEEE80211_SKB_CB(skb);
525 522
526 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 523 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */
527 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 / 524 timeout = (sta->listen_interval * local->hw.conf.beacon_int * 32 /
528 15625) * HZ; 525 15625) * HZ;
529 if (timeout < STA_TX_BUFFER_EXPIRE) 526 if (timeout < STA_TX_BUFFER_EXPIRE)
530 timeout = STA_TX_BUFFER_EXPIRE; 527 timeout = STA_TX_BUFFER_EXPIRE;
531 return time_after(jiffies, pkt_data->jiffies + timeout); 528 return time_after(jiffies, info->control.jiffies + timeout);
532} 529}
533 530
534 531
@@ -557,8 +554,10 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
557 554
558 sdata = sta->sdata; 555 sdata = sta->sdata;
559 local->total_ps_buffered--; 556 local->total_ps_buffered--;
557#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
560 printk(KERN_DEBUG "Buffered frame expired (STA " 558 printk(KERN_DEBUG "Buffered frame expired (STA "
561 "%s)\n", print_mac(mac, sta->addr)); 559 "%s)\n", print_mac(mac, sta->addr));
560#endif
562 dev_kfree_skb(skb); 561 dev_kfree_skb(skb);
563 562
564 if (skb_queue_empty(&sta->ps_tx_buf)) 563 if (skb_queue_empty(&sta->ps_tx_buf))
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index f8c95bc9659c..fd228c198e31 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -32,7 +32,7 @@
32 * @WLAN_STA_WDS: Station is one of our WDS peers. 32 * @WLAN_STA_WDS: Station is one of our WDS peers.
33 * @WLAN_STA_PSPOLL: Station has just PS-polled us. 33 * @WLAN_STA_PSPOLL: Station has just PS-polled us.
34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the 34 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
35 * IEEE80211_TXCTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 */ 37 */
38enum ieee80211_sta_info_flags { 38enum ieee80211_sta_info_flags {
@@ -129,23 +129,19 @@ enum plink_state {
129 * 129 *
130 * @tid_state_rx: TID's state in Rx session state machine. 130 * @tid_state_rx: TID's state in Rx session state machine.
131 * @tid_rx: aggregation info for Rx per TID 131 * @tid_rx: aggregation info for Rx per TID
132 * @ampdu_rx: for locking sections in aggregation Rx flow
133 * @tid_state_tx: TID's state in Tx session state machine. 132 * @tid_state_tx: TID's state in Tx session state machine.
134 * @tid_tx: aggregation info for Tx per TID 133 * @tid_tx: aggregation info for Tx per TID
135 * @addba_req_num: number of times addBA request has been sent. 134 * @addba_req_num: number of times addBA request has been sent.
136 * @ampdu_tx: for locking sectionsi in aggregation Tx flow
137 * @dialog_token_allocator: dialog token enumerator for each new session; 135 * @dialog_token_allocator: dialog token enumerator for each new session;
138 */ 136 */
139struct sta_ampdu_mlme { 137struct sta_ampdu_mlme {
140 /* rx */ 138 /* rx */
141 u8 tid_state_rx[STA_TID_NUM]; 139 u8 tid_state_rx[STA_TID_NUM];
142 struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; 140 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
143 spinlock_t ampdu_rx;
144 /* tx */ 141 /* tx */
145 u8 tid_state_tx[STA_TID_NUM]; 142 u8 tid_state_tx[STA_TID_NUM];
146 struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; 143 struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
147 u8 addba_req_num[STA_TID_NUM]; 144 u8 addba_req_num[STA_TID_NUM];
148 spinlock_t ampdu_tx;
149 u8 dialog_token_allocator; 145 u8 dialog_token_allocator;
150}; 146};
151 147
@@ -168,6 +164,7 @@ struct sta_ampdu_mlme {
168 * @aid: STA's unique AID (1..2007, 0 = not assigned yet), 164 * @aid: STA's unique AID (1..2007, 0 = not assigned yet),
169 * only used in AP (and IBSS?) mode 165 * only used in AP (and IBSS?) mode
170 * @flags: STA flags, see &enum ieee80211_sta_info_flags 166 * @flags: STA flags, see &enum ieee80211_sta_info_flags
167 * @flaglock: spinlock for flags accesses
171 * @ps_tx_buf: buffer of frames to transmit to this station 168 * @ps_tx_buf: buffer of frames to transmit to this station
172 * when it leaves power saving state 169 * when it leaves power saving state
173 * @tx_filtered: buffer of frames we already tried to transmit 170 * @tx_filtered: buffer of frames we already tried to transmit
@@ -177,6 +174,8 @@ struct sta_ampdu_mlme {
177 * @rx_bytes: Number of bytes received from this STA 174 * @rx_bytes: Number of bytes received from this STA
178 * @supp_rates: Bitmap of supported rates (per band) 175 * @supp_rates: Bitmap of supported rates (per band)
179 * @ht_info: HT capabilities of this STA 176 * @ht_info: HT capabilities of this STA
177 * @lock: used for locking all fields that require locking, see comments
178 * in the header file.
180 */ 179 */
181struct sta_info { 180struct sta_info {
182 /* General information, mostly static */ 181 /* General information, mostly static */
@@ -187,6 +186,8 @@ struct sta_info {
187 struct ieee80211_key *key; 186 struct ieee80211_key *key;
188 struct rate_control_ref *rate_ctrl; 187 struct rate_control_ref *rate_ctrl;
189 void *rate_ctrl_priv; 188 void *rate_ctrl_priv;
189 spinlock_t lock;
190 spinlock_t flaglock;
190 struct ieee80211_ht_info ht_info; 191 struct ieee80211_ht_info ht_info;
191 u64 supp_rates[IEEE80211_NUM_BANDS]; 192 u64 supp_rates[IEEE80211_NUM_BANDS];
192 u8 addr[ETH_ALEN]; 193 u8 addr[ETH_ALEN];
@@ -199,7 +200,10 @@ struct sta_info {
199 */ 200 */
200 u8 pin_status; 201 u8 pin_status;
201 202
202 /* frequently updated information, needs locking? */ 203 /*
204 * frequently updated, locked with own spinlock (flaglock),
205 * use the accessors defined below
206 */
203 u32 flags; 207 u32 flags;
204 208
205 /* 209 /*
@@ -217,8 +221,8 @@ struct sta_info {
217 * from this STA */ 221 * from this STA */
218 unsigned long rx_fragments; /* number of received MPDUs */ 222 unsigned long rx_fragments; /* number of received MPDUs */
219 unsigned long rx_dropped; /* number of dropped MPDUs from this STA */ 223 unsigned long rx_dropped; /* number of dropped MPDUs from this STA */
220 int last_rssi; /* RSSI of last received frame from this STA */
221 int last_signal; /* signal of last received frame from this STA */ 224 int last_signal; /* signal of last received frame from this STA */
225 int last_qual; /* qual of last received frame from this STA */
222 int last_noise; /* noise of last received frame from this STA */ 226 int last_noise; /* noise of last received frame from this STA */
223 /* last received seq/frag number from this STA (per RX queue) */ 227 /* last received seq/frag number from this STA (per RX queue) */
224 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 228 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
@@ -246,12 +250,8 @@ struct sta_info {
246 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; 250 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
247#endif 251#endif
248 252
249 /* Debug counters, no locking doesn't matter */
250 int channel_use;
251 int channel_use_raw;
252
253 /* 253 /*
254 * Aggregation information, comes with own locking. 254 * Aggregation information, locked with lock.
255 */ 255 */
256 struct sta_ampdu_mlme ampdu_mlme; 256 struct sta_ampdu_mlme ampdu_mlme;
257 u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */ 257 u8 timer_to_tid[STA_TID_NUM]; /* identity mapping to ID timers */
@@ -270,9 +270,6 @@ struct sta_info {
270 enum plink_state plink_state; 270 enum plink_state plink_state;
271 u32 plink_timeout; 271 u32 plink_timeout;
272 struct timer_list plink_timer; 272 struct timer_list plink_timer;
273 spinlock_t plink_lock; /* For peer_state reads / updates and other
274 updates in the structure. Ensures robust
275 transitions for the peerlink FSM */
276#endif 273#endif
277 274
278#ifdef CONFIG_MAC80211_DEBUGFS 275#ifdef CONFIG_MAC80211_DEBUGFS
@@ -299,6 +296,73 @@ static inline enum plink_state sta_plink_state(struct sta_info *sta)
299 return PLINK_LISTEN; 296 return PLINK_LISTEN;
300} 297}
301 298
299static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
300{
301 unsigned long irqfl;
302
303 spin_lock_irqsave(&sta->flaglock, irqfl);
304 sta->flags |= flags;
305 spin_unlock_irqrestore(&sta->flaglock, irqfl);
306}
307
308static inline void clear_sta_flags(struct sta_info *sta, const u32 flags)
309{
310 unsigned long irqfl;
311
312 spin_lock_irqsave(&sta->flaglock, irqfl);
313 sta->flags &= ~flags;
314 spin_unlock_irqrestore(&sta->flaglock, irqfl);
315}
316
317static inline void set_and_clear_sta_flags(struct sta_info *sta,
318 const u32 set, const u32 clear)
319{
320 unsigned long irqfl;
321
322 spin_lock_irqsave(&sta->flaglock, irqfl);
323 sta->flags |= set;
324 sta->flags &= ~clear;
325 spin_unlock_irqrestore(&sta->flaglock, irqfl);
326}
327
328static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags)
329{
330 u32 ret;
331 unsigned long irqfl;
332
333 spin_lock_irqsave(&sta->flaglock, irqfl);
334 ret = sta->flags & flags;
335 spin_unlock_irqrestore(&sta->flaglock, irqfl);
336
337 return ret;
338}
339
340static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
341 const u32 flags)
342{
343 u32 ret;
344 unsigned long irqfl;
345
346 spin_lock_irqsave(&sta->flaglock, irqfl);
347 ret = sta->flags & flags;
348 sta->flags &= ~flags;
349 spin_unlock_irqrestore(&sta->flaglock, irqfl);
350
351 return ret;
352}
353
354static inline u32 get_sta_flags(struct sta_info *sta)
355{
356 u32 ret;
357 unsigned long irqfl;
358
359 spin_lock_irqsave(&sta->flaglock, irqfl);
360 ret = sta->flags;
361 spin_unlock_irqrestore(&sta->flaglock, irqfl);
362
363 return ret;
364}
365
302 366
303/* Maximum number of concurrently registered stations */ 367/* Maximum number of concurrently registered stations */
304#define MAX_STA_COUNT 2007 368#define MAX_STA_COUNT 2007
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 09093da24af6..995f7af3d25e 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -6,25 +6,23 @@
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9
10#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/bitops.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <asm/unaligned.h>
13 14
14#include <net/mac80211.h> 15#include <net/mac80211.h>
15#include "key.h" 16#include "key.h"
16#include "tkip.h" 17#include "tkip.h"
17#include "wep.h" 18#include "wep.h"
18 19
19
20/* TKIP key mixing functions */
21
22
23#define PHASE1_LOOP_COUNT 8 20#define PHASE1_LOOP_COUNT 8
24 21
25 22/*
26/* 2-byte by 2-byte subset of the full AES S-box table; second part of this 23 * 2-byte by 2-byte subset of the full AES S-box table; second part of this
27 * table is identical to first part but byte-swapped */ 24 * table is identical to first part but byte-swapped
25 */
28static const u16 tkip_sbox[256] = 26static const u16 tkip_sbox[256] =
29{ 27{
30 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 28 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
@@ -61,84 +59,54 @@ static const u16 tkip_sbox[256] =
61 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, 59 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
62}; 60};
63 61
64 62static u16 tkipS(u16 val)
65static inline u16 Mk16(u8 x, u8 y)
66{ 63{
67 return ((u16) x << 8) | (u16) y; 64 return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]);
68} 65}
69 66
70 67static u8 *write_tkip_iv(u8 *pos, u16 iv16)
71static inline u8 Hi8(u16 v)
72{
73 return v >> 8;
74}
75
76
77static inline u8 Lo8(u16 v)
78{
79 return v & 0xff;
80}
81
82
83static inline u16 Hi16(u32 v)
84{
85 return v >> 16;
86}
87
88
89static inline u16 Lo16(u32 v)
90{
91 return v & 0xffff;
92}
93
94
95static inline u16 RotR1(u16 v)
96{
97 return (v >> 1) | ((v & 0x0001) << 15);
98}
99
100
101static inline u16 tkip_S(u16 val)
102{ 68{
103 u16 a = tkip_sbox[Hi8(val)]; 69 *pos++ = iv16 >> 8;
104 70 *pos++ = ((iv16 >> 8) | 0x20) & 0x7f;
105 return tkip_sbox[Lo8(val)] ^ Hi8(a) ^ (Lo8(a) << 8); 71 *pos++ = iv16 & 0xFF;
72 return pos;
106} 73}
107 74
108 75/*
109 76 * P1K := Phase1(TA, TK, TSC)
110/* P1K := Phase1(TA, TK, TSC)
111 * TA = transmitter address (48 bits) 77 * TA = transmitter address (48 bits)
112 * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) 78 * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits)
113 * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) 79 * TSC = TKIP sequence counter (48 bits, only 32 msb bits used)
114 * P1K: 80 bits 80 * P1K: 80 bits
115 */ 81 */
116static void tkip_mixing_phase1(const u8 *ta, const u8 *tk, u32 tsc_IV32, 82static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
117 u16 *p1k) 83 const u8 *ta, u32 tsc_IV32)
118{ 84{
119 int i, j; 85 int i, j;
86 u16 *p1k = ctx->p1k;
120 87
121 p1k[0] = Lo16(tsc_IV32); 88 p1k[0] = tsc_IV32 & 0xFFFF;
122 p1k[1] = Hi16(tsc_IV32); 89 p1k[1] = tsc_IV32 >> 16;
123 p1k[2] = Mk16(ta[1], ta[0]); 90 p1k[2] = get_unaligned_le16(ta + 0);
124 p1k[3] = Mk16(ta[3], ta[2]); 91 p1k[3] = get_unaligned_le16(ta + 2);
125 p1k[4] = Mk16(ta[5], ta[4]); 92 p1k[4] = get_unaligned_le16(ta + 4);
126 93
127 for (i = 0; i < PHASE1_LOOP_COUNT; i++) { 94 for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
128 j = 2 * (i & 1); 95 j = 2 * (i & 1);
129 p1k[0] += tkip_S(p1k[4] ^ Mk16(tk[ 1 + j], tk[ 0 + j])); 96 p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j));
130 p1k[1] += tkip_S(p1k[0] ^ Mk16(tk[ 5 + j], tk[ 4 + j])); 97 p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j));
131 p1k[2] += tkip_S(p1k[1] ^ Mk16(tk[ 9 + j], tk[ 8 + j])); 98 p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j));
132 p1k[3] += tkip_S(p1k[2] ^ Mk16(tk[13 + j], tk[12 + j])); 99 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
133 p1k[4] += tkip_S(p1k[3] ^ Mk16(tk[ 1 + j], tk[ 0 + j])) + i; 100 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
134 } 101 }
102 ctx->initialized = 1;
135} 103}
136 104
137 105static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
138static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16, 106 u16 tsc_IV16, u8 *rc4key)
139 u8 *rc4key)
140{ 107{
141 u16 ppk[6]; 108 u16 ppk[6];
109 const u16 *p1k = ctx->p1k;
142 int i; 110 int i;
143 111
144 ppk[0] = p1k[0]; 112 ppk[0] = p1k[0];
@@ -148,70 +116,35 @@ static void tkip_mixing_phase2(const u16 *p1k, const u8 *tk, u16 tsc_IV16,
148 ppk[4] = p1k[4]; 116 ppk[4] = p1k[4];
149 ppk[5] = p1k[4] + tsc_IV16; 117 ppk[5] = p1k[4] + tsc_IV16;
150 118
151 ppk[0] += tkip_S(ppk[5] ^ Mk16(tk[ 1], tk[ 0])); 119 ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0));
152 ppk[1] += tkip_S(ppk[0] ^ Mk16(tk[ 3], tk[ 2])); 120 ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2));
153 ppk[2] += tkip_S(ppk[1] ^ Mk16(tk[ 5], tk[ 4])); 121 ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4));
154 ppk[3] += tkip_S(ppk[2] ^ Mk16(tk[ 7], tk[ 6])); 122 ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6));
155 ppk[4] += tkip_S(ppk[3] ^ Mk16(tk[ 9], tk[ 8])); 123 ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8));
156 ppk[5] += tkip_S(ppk[4] ^ Mk16(tk[11], tk[10])); 124 ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10));
157 ppk[0] += RotR1(ppk[5] ^ Mk16(tk[13], tk[12])); 125 ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1);
158 ppk[1] += RotR1(ppk[0] ^ Mk16(tk[15], tk[14])); 126 ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1);
159 ppk[2] += RotR1(ppk[1]); 127 ppk[2] += ror16(ppk[1], 1);
160 ppk[3] += RotR1(ppk[2]); 128 ppk[3] += ror16(ppk[2], 1);
161 ppk[4] += RotR1(ppk[3]); 129 ppk[4] += ror16(ppk[3], 1);
162 ppk[5] += RotR1(ppk[4]); 130 ppk[5] += ror16(ppk[4], 1);
163 131
164 rc4key[0] = Hi8(tsc_IV16); 132 rc4key = write_tkip_iv(rc4key, tsc_IV16);
165 rc4key[1] = (Hi8(tsc_IV16) | 0x20) & 0x7f; 133 *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF;
166 rc4key[2] = Lo8(tsc_IV16); 134
167 rc4key[3] = Lo8((ppk[5] ^ Mk16(tk[1], tk[0])) >> 1); 135 for (i = 0; i < 6; i++)
168 136 put_unaligned_le16(ppk[i], rc4key + 2 * i);
169 for (i = 0; i < 6; i++) {
170 rc4key[4 + 2 * i] = Lo8(ppk[i]);
171 rc4key[5 + 2 * i] = Hi8(ppk[i]);
172 }
173} 137}
174 138
175
176/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets 139/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
177 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of 140 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of
178 * the packet payload). */ 141 * the packet payload). */
179u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, 142u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16)
180 u8 iv0, u8 iv1, u8 iv2)
181{ 143{
182 *pos++ = iv0; 144 pos = write_tkip_iv(pos, iv16);
183 *pos++ = iv1;
184 *pos++ = iv2;
185 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; 145 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */;
186 *pos++ = key->u.tkip.iv32 & 0xff; 146 put_unaligned_le32(key->u.tkip.tx.iv32, pos);
187 *pos++ = (key->u.tkip.iv32 >> 8) & 0xff; 147 return pos + 4;
188 *pos++ = (key->u.tkip.iv32 >> 16) & 0xff;
189 *pos++ = (key->u.tkip.iv32 >> 24) & 0xff;
190 return pos;
191}
192
193
194void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta,
195 u16 *phase1key)
196{
197 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
198 key->u.tkip.iv32, phase1key);
199}
200
201void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta,
202 u8 *rc4key)
203{
204 /* Calculate per-packet key */
205 if (key->u.tkip.iv16 == 0 || !key->u.tkip.tx_initialized) {
206 /* IV16 wrapped around - perform TKIP phase 1 */
207 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
208 key->u.tkip.iv32, key->u.tkip.p1k);
209 key->u.tkip.tx_initialized = 1;
210 }
211
212 tkip_mixing_phase2(key->u.tkip.p1k,
213 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
214 key->u.tkip.iv16, rc4key);
215} 148}
216 149
217void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, 150void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
@@ -220,48 +153,44 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
220{ 153{
221 struct ieee80211_key *key = (struct ieee80211_key *) 154 struct ieee80211_key *key = (struct ieee80211_key *)
222 container_of(keyconf, struct ieee80211_key, conf); 155 container_of(keyconf, struct ieee80211_key, conf);
223 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
224 u8 *data = (u8 *) hdr; 157 u8 *data;
225 u16 fc = le16_to_cpu(hdr->frame_control); 158 const u8 *tk;
226 int hdr_len = ieee80211_get_hdrlen(fc); 159 struct tkip_ctx *ctx;
227 u8 *ta = hdr->addr2;
228 u16 iv16; 160 u16 iv16;
229 u32 iv32; 161 u32 iv32;
230 162
231 iv16 = data[hdr_len] << 8; 163 data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
232 iv16 += data[hdr_len + 2]; 164 iv16 = data[2] | (data[0] << 8);
233 iv32 = data[hdr_len + 4] | (data[hdr_len + 5] << 8) | 165 iv32 = get_unaligned_le32(&data[4]);
234 (data[hdr_len + 6] << 16) | (data[hdr_len + 7] << 24); 166
167 tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
168 ctx = &key->u.tkip.tx;
235 169
236#ifdef CONFIG_TKIP_DEBUG 170#ifdef CONFIG_MAC80211_TKIP_DEBUG
237 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n", 171 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n",
238 iv16, iv32); 172 iv16, iv32);
239 173
240 if (iv32 != key->u.tkip.iv32) { 174 if (iv32 != ctx->iv32) {
241 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n", 175 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n",
242 iv32, key->u.tkip.iv32); 176 iv32, ctx->iv32);
243 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a " 177 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a "
244 "fragmented packet\n"); 178 "fragmented packet\n");
245 } 179 }
246#endif /* CONFIG_TKIP_DEBUG */ 180#endif
247 181
248 /* Update the p1k only when the iv16 in the packet wraps around, this 182 /* Update the p1k only when the iv16 in the packet wraps around, this
249 * might occur after the wrap around of iv16 in the key in case of 183 * might occur after the wrap around of iv16 in the key in case of
250 * fragmented packets. */ 184 * fragmented packets. */
251 if (iv16 == 0 || !key->u.tkip.tx_initialized) { 185 if (iv16 == 0 || !ctx->initialized)
252 /* IV16 wrapped around - perform TKIP phase 1 */ 186 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
253 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY],
254 iv32, key->u.tkip.p1k);
255 key->u.tkip.tx_initialized = 1;
256 }
257 187
258 if (type == IEEE80211_TKIP_P1_KEY) { 188 if (type == IEEE80211_TKIP_P1_KEY) {
259 memcpy(outkey, key->u.tkip.p1k, sizeof(u16) * 5); 189 memcpy(outkey, ctx->p1k, sizeof(u16) * 5);
260 return; 190 return;
261 } 191 }
262 192
263 tkip_mixing_phase2(key->u.tkip.p1k, 193 tkip_mixing_phase2(tk, ctx, iv16, outkey);
264 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], iv16, outkey);
265} 194}
266EXPORT_SYMBOL(ieee80211_get_tkip_key); 195EXPORT_SYMBOL(ieee80211_get_tkip_key);
267 196
@@ -275,13 +204,19 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
275 u8 *pos, size_t payload_len, u8 *ta) 204 u8 *pos, size_t payload_len, u8 *ta)
276{ 205{
277 u8 rc4key[16]; 206 u8 rc4key[16];
207 struct tkip_ctx *ctx = &key->u.tkip.tx;
208 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
209
210 /* Calculate per-packet key */
211 if (ctx->iv16 == 0 || !ctx->initialized)
212 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
213
214 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
278 215
279 ieee80211_tkip_gen_rc4key(key, ta, rc4key); 216 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
280 pos = ieee80211_tkip_add_iv(pos, key, rc4key[0], rc4key[1], rc4key[2]);
281 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 217 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
282} 218}
283 219
284
285/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the 220/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
286 * beginning of the buffer containing IEEE 802.11 header payload, i.e., 221 * beginning of the buffer containing IEEE 802.11 header payload, i.e.,
287 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the 222 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
@@ -296,15 +231,16 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
296 u32 iv16; 231 u32 iv16;
297 u8 rc4key[16], keyid, *pos = payload; 232 u8 rc4key[16], keyid, *pos = payload;
298 int res; 233 int res;
234 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
299 235
300 if (payload_len < 12) 236 if (payload_len < 12)
301 return -1; 237 return -1;
302 238
303 iv16 = (pos[0] << 8) | pos[2]; 239 iv16 = (pos[0] << 8) | pos[2];
304 keyid = pos[3]; 240 keyid = pos[3];
305 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); 241 iv32 = get_unaligned_le32(pos + 4);
306 pos += 8; 242 pos += 8;
307#ifdef CONFIG_TKIP_DEBUG 243#ifdef CONFIG_MAC80211_TKIP_DEBUG
308 { 244 {
309 int i; 245 int i;
310 printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len); 246 printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len);
@@ -314,7 +250,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
314 printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n", 250 printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n",
315 iv16, iv32); 251 iv16, iv32);
316 } 252 }
317#endif /* CONFIG_TKIP_DEBUG */ 253#endif
318 254
319 if (!(keyid & (1 << 5))) 255 if (!(keyid & (1 << 5)))
320 return TKIP_DECRYPT_NO_EXT_IV; 256 return TKIP_DECRYPT_NO_EXT_IV;
@@ -322,50 +258,48 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
322 if ((keyid >> 6) != key->conf.keyidx) 258 if ((keyid >> 6) != key->conf.keyidx)
323 return TKIP_DECRYPT_INVALID_KEYIDX; 259 return TKIP_DECRYPT_INVALID_KEYIDX;
324 260
325 if (key->u.tkip.rx_initialized[queue] && 261 if (key->u.tkip.rx[queue].initialized &&
326 (iv32 < key->u.tkip.iv32_rx[queue] || 262 (iv32 < key->u.tkip.rx[queue].iv32 ||
327 (iv32 == key->u.tkip.iv32_rx[queue] && 263 (iv32 == key->u.tkip.rx[queue].iv32 &&
328 iv16 <= key->u.tkip.iv16_rx[queue]))) { 264 iv16 <= key->u.tkip.rx[queue].iv16))) {
329#ifdef CONFIG_TKIP_DEBUG 265#ifdef CONFIG_MAC80211_TKIP_DEBUG
330 DECLARE_MAC_BUF(mac); 266 DECLARE_MAC_BUF(mac);
331 printk(KERN_DEBUG "TKIP replay detected for RX frame from " 267 printk(KERN_DEBUG "TKIP replay detected for RX frame from "
332 "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", 268 "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n",
333 print_mac(mac, ta), 269 print_mac(mac, ta),
334 iv32, iv16, key->u.tkip.iv32_rx[queue], 270 iv32, iv16, key->u.tkip.rx[queue].iv32,
335 key->u.tkip.iv16_rx[queue]); 271 key->u.tkip.rx[queue].iv16);
336#endif /* CONFIG_TKIP_DEBUG */ 272#endif
337 return TKIP_DECRYPT_REPLAY; 273 return TKIP_DECRYPT_REPLAY;
338 } 274 }
339 275
340 if (only_iv) { 276 if (only_iv) {
341 res = TKIP_DECRYPT_OK; 277 res = TKIP_DECRYPT_OK;
342 key->u.tkip.rx_initialized[queue] = 1; 278 key->u.tkip.rx[queue].initialized = 1;
343 goto done; 279 goto done;
344 } 280 }
345 281
346 if (!key->u.tkip.rx_initialized[queue] || 282 if (!key->u.tkip.rx[queue].initialized ||
347 key->u.tkip.iv32_rx[queue] != iv32) { 283 key->u.tkip.rx[queue].iv32 != iv32) {
348 key->u.tkip.rx_initialized[queue] = 1;
349 /* IV16 wrapped around - perform TKIP phase 1 */ 284 /* IV16 wrapped around - perform TKIP phase 1 */
350 tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], 285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
351 iv32, key->u.tkip.p1k_rx[queue]); 286#ifdef CONFIG_MAC80211_TKIP_DEBUG
352#ifdef CONFIG_TKIP_DEBUG
353 { 287 {
354 int i; 288 int i;
289 u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY;
355 DECLARE_MAC_BUF(mac); 290 DECLARE_MAC_BUF(mac);
356 printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%s" 291 printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%s"
357 " TK=", print_mac(mac, ta)); 292 " TK=", print_mac(mac, ta));
358 for (i = 0; i < 16; i++) 293 for (i = 0; i < 16; i++)
359 printk("%02x ", 294 printk("%02x ",
360 key->conf.key[ 295 key->conf.key[key_offset + i]);
361 ALG_TKIP_TEMP_ENCR_KEY + i]);
362 printk("\n"); 296 printk("\n");
363 printk(KERN_DEBUG "TKIP decrypt: P1K="); 297 printk(KERN_DEBUG "TKIP decrypt: P1K=");
364 for (i = 0; i < 5; i++) 298 for (i = 0; i < 5; i++)
365 printk("%04x ", key->u.tkip.p1k_rx[queue][i]); 299 printk("%04x ", key->u.tkip.rx[queue].p1k[i]);
366 printk("\n"); 300 printk("\n");
367 } 301 }
368#endif /* CONFIG_TKIP_DEBUG */ 302#endif
369 if (key->local->ops->update_tkip_key && 303 if (key->local->ops->update_tkip_key &&
370 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 304 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
371 u8 bcast[ETH_ALEN] = 305 u8 bcast[ETH_ALEN] =
@@ -377,14 +311,12 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
377 311
378 key->local->ops->update_tkip_key( 312 key->local->ops->update_tkip_key(
379 local_to_hw(key->local), &key->conf, 313 local_to_hw(key->local), &key->conf,
380 sta_addr, iv32, key->u.tkip.p1k_rx[queue]); 314 sta_addr, iv32, key->u.tkip.rx[queue].p1k);
381 } 315 }
382 } 316 }
383 317
384 tkip_mixing_phase2(key->u.tkip.p1k_rx[queue], 318 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
385 &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], 319#ifdef CONFIG_MAC80211_TKIP_DEBUG
386 iv16, rc4key);
387#ifdef CONFIG_TKIP_DEBUG
388 { 320 {
389 int i; 321 int i;
390 printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key="); 322 printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key=");
@@ -392,7 +324,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
392 printk("%02x ", rc4key[i]); 324 printk("%02x ", rc4key[i]);
393 printk("\n"); 325 printk("\n");
394 } 326 }
395#endif /* CONFIG_TKIP_DEBUG */ 327#endif
396 328
397 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); 329 res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
398 done: 330 done:
@@ -409,5 +341,3 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
409 341
410 return res; 342 return res;
411} 343}
412
413
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index b7c2ee763d9d..d4714383f5fc 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -13,12 +13,8 @@
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include "key.h" 14#include "key.h"
15 15
16u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16);
17 u8 iv0, u8 iv1, u8 iv2); 17
18void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta,
19 u16 *phase1key);
20void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta,
21 u8 *rc4key);
22void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 18void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
23 struct ieee80211_key *key, 19 struct ieee80211_key *key,
24 u8 *pos, size_t payload_len, u8 *ta); 20 u8 *pos, size_t payload_len, u8 *ta);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c80d5899f279..9bd9faac3c3c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -52,9 +52,8 @@ static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdat
52static void ieee80211_dump_frame(const char *ifname, const char *title, 52static void ieee80211_dump_frame(const char *ifname, const char *title,
53 const struct sk_buff *skb) 53 const struct sk_buff *skb)
54{ 54{
55 const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 55 const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
56 u16 fc; 56 unsigned int hdrlen;
57 int hdrlen;
58 DECLARE_MAC_BUF(mac); 57 DECLARE_MAC_BUF(mac);
59 58
60 printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); 59 printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len);
@@ -63,13 +62,12 @@ static void ieee80211_dump_frame(const char *ifname, const char *title,
63 return; 62 return;
64 } 63 }
65 64
66 fc = le16_to_cpu(hdr->frame_control); 65 hdrlen = ieee80211_hdrlen(hdr->frame_control);
67 hdrlen = ieee80211_get_hdrlen(fc);
68 if (hdrlen > skb->len) 66 if (hdrlen > skb->len)
69 hdrlen = skb->len; 67 hdrlen = skb->len;
70 if (hdrlen >= 4) 68 if (hdrlen >= 4)
71 printk(" FC=0x%04x DUR=0x%04x", 69 printk(" FC=0x%04x DUR=0x%04x",
72 fc, le16_to_cpu(hdr->duration_id)); 70 le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id));
73 if (hdrlen >= 10) 71 if (hdrlen >= 10)
74 printk(" A1=%s", print_mac(mac, hdr->addr1)); 72 printk(" A1=%s", print_mac(mac, hdr->addr1));
75 if (hdrlen >= 16) 73 if (hdrlen >= 16)
@@ -87,15 +85,16 @@ static inline void ieee80211_dump_frame(const char *ifname, const char *title,
87} 85}
88#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ 86#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
89 87
90static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 88static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
91 int next_frag_len) 89 int next_frag_len)
92{ 90{
93 int rate, mrate, erp, dur, i; 91 int rate, mrate, erp, dur, i;
94 struct ieee80211_rate *txrate = tx->rate; 92 struct ieee80211_rate *txrate;
95 struct ieee80211_local *local = tx->local; 93 struct ieee80211_local *local = tx->local;
96 struct ieee80211_supported_band *sband; 94 struct ieee80211_supported_band *sband;
97 95
98 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 96 sband = local->hw.wiphy->bands[tx->channel->band];
97 txrate = &sband->bitrates[tx->rate_idx];
99 98
100 erp = 0; 99 erp = 0;
101 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 100 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
@@ -139,7 +138,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
139 138
140 /* data/mgmt */ 139 /* data/mgmt */
141 if (0 /* FIX: data/mgmt during CFP */) 140 if (0 /* FIX: data/mgmt during CFP */)
142 return 32768; 141 return cpu_to_le16(32768);
143 142
144 if (group_addr) /* Group address as the destination - no ACK */ 143 if (group_addr) /* Group address as the destination - no ACK */
145 return 0; 144 return 0;
@@ -209,19 +208,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
209 tx->sdata->bss_conf.use_short_preamble); 208 tx->sdata->bss_conf.use_short_preamble);
210 } 209 }
211 210
212 return dur; 211 return cpu_to_le16(dur);
213}
214
215static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local,
216 int queue)
217{
218 return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]);
219}
220
221static inline int __ieee80211_queue_pending(const struct ieee80211_local *local,
222 int queue)
223{
224 return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]);
225} 212}
226 213
227static int inline is_ieee80211_device(struct net_device *dev, 214static int inline is_ieee80211_device(struct net_device *dev,
@@ -233,16 +220,16 @@ static int inline is_ieee80211_device(struct net_device *dev,
233 220
234/* tx handlers */ 221/* tx handlers */
235 222
236static ieee80211_tx_result 223static ieee80211_tx_result debug_noinline
237ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 224ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
238{ 225{
239#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 226#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
240 struct sk_buff *skb = tx->skb; 227 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
241 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
242#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 228#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
229 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
243 u32 sta_flags; 230 u32 sta_flags;
244 231
245 if (unlikely(tx->flags & IEEE80211_TX_INJECTED)) 232 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
246 return TX_CONTINUE; 233 return TX_CONTINUE;
247 234
248 if (unlikely(tx->local->sta_sw_scanning) && 235 if (unlikely(tx->local->sta_sw_scanning) &&
@@ -256,7 +243,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
256 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 243 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
257 return TX_CONTINUE; 244 return TX_CONTINUE;
258 245
259 sta_flags = tx->sta ? tx->sta->flags : 0; 246 sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
260 247
261 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 248 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
262 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 249 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
@@ -287,12 +274,12 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
287 return TX_CONTINUE; 274 return TX_CONTINUE;
288} 275}
289 276
290static ieee80211_tx_result 277static ieee80211_tx_result debug_noinline
291ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) 278ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
292{ 279{
293 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 280 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
294 281
295 if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) 282 if (ieee80211_hdrlen(hdr->frame_control) >= 24)
296 ieee80211_include_sequence(tx->sdata, hdr); 283 ieee80211_include_sequence(tx->sdata, hdr);
297 284
298 return TX_CONTINUE; 285 return TX_CONTINUE;
@@ -340,13 +327,17 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
340 rcu_read_unlock(); 327 rcu_read_unlock();
341 328
342 local->total_ps_buffered = total; 329 local->total_ps_buffered = total;
330#ifdef MAC80211_VERBOSE_PS_DEBUG
343 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", 331 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n",
344 wiphy_name(local->hw.wiphy), purged); 332 wiphy_name(local->hw.wiphy), purged);
333#endif
345} 334}
346 335
347static ieee80211_tx_result 336static ieee80211_tx_result
348ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) 337ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
349{ 338{
339 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
340
350 /* 341 /*
351 * broadcast/multicast frame 342 * broadcast/multicast frame
352 * 343 *
@@ -369,11 +360,13 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
369 purge_old_ps_buffers(tx->local); 360 purge_old_ps_buffers(tx->local);
370 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= 361 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >=
371 AP_MAX_BC_BUFFER) { 362 AP_MAX_BC_BUFFER) {
363#ifdef MAC80211_VERBOSE_PS_DEBUG
372 if (net_ratelimit()) { 364 if (net_ratelimit()) {
373 printk(KERN_DEBUG "%s: BC TX buffer full - " 365 printk(KERN_DEBUG "%s: BC TX buffer full - "
374 "dropping the oldest frame\n", 366 "dropping the oldest frame\n",
375 tx->dev->name); 367 tx->dev->name);
376 } 368 }
369#endif
377 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 370 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
378 } else 371 } else
379 tx->local->total_ps_buffered++; 372 tx->local->total_ps_buffered++;
@@ -382,7 +375,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
382 } 375 }
383 376
384 /* buffered in hardware */ 377 /* buffered in hardware */
385 tx->control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; 378 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
386 379
387 return TX_CONTINUE; 380 return TX_CONTINUE;
388} 381}
@@ -391,6 +384,8 @@ static ieee80211_tx_result
391ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) 384ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
392{ 385{
393 struct sta_info *sta = tx->sta; 386 struct sta_info *sta = tx->sta;
387 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
388 u32 staflags;
394 DECLARE_MAC_BUF(mac); 389 DECLARE_MAC_BUF(mac);
395 390
396 if (unlikely(!sta || 391 if (unlikely(!sta ||
@@ -398,9 +393,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
398 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) 393 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
399 return TX_CONTINUE; 394 return TX_CONTINUE;
400 395
401 if (unlikely((sta->flags & WLAN_STA_PS) && 396 staflags = get_sta_flags(sta);
402 !(sta->flags & WLAN_STA_PSPOLL))) { 397
403 struct ieee80211_tx_packet_data *pkt_data; 398 if (unlikely((staflags & WLAN_STA_PS) &&
399 !(staflags & WLAN_STA_PSPOLL))) {
404#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 400#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
405 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " 401 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
406 "before %d)\n", 402 "before %d)\n",
@@ -411,11 +407,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
411 purge_old_ps_buffers(tx->local); 407 purge_old_ps_buffers(tx->local);
412 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { 408 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
413 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); 409 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
410#ifdef MAC80211_VERBOSE_PS_DEBUG
414 if (net_ratelimit()) { 411 if (net_ratelimit()) {
415 printk(KERN_DEBUG "%s: STA %s TX " 412 printk(KERN_DEBUG "%s: STA %s TX "
416 "buffer full - dropping oldest frame\n", 413 "buffer full - dropping oldest frame\n",
417 tx->dev->name, print_mac(mac, sta->addr)); 414 tx->dev->name, print_mac(mac, sta->addr));
418 } 415 }
416#endif
419 dev_kfree_skb(old); 417 dev_kfree_skb(old);
420 } else 418 } else
421 tx->local->total_ps_buffered++; 419 tx->local->total_ps_buffered++;
@@ -424,24 +422,23 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
424 if (skb_queue_empty(&sta->ps_tx_buf)) 422 if (skb_queue_empty(&sta->ps_tx_buf))
425 sta_info_set_tim_bit(sta); 423 sta_info_set_tim_bit(sta);
426 424
427 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; 425 info->control.jiffies = jiffies;
428 pkt_data->jiffies = jiffies;
429 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 426 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
430 return TX_QUEUED; 427 return TX_QUEUED;
431 } 428 }
432#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 429#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
433 else if (unlikely(sta->flags & WLAN_STA_PS)) { 430 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) {
434 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " 431 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll "
435 "set -> send frame\n", tx->dev->name, 432 "set -> send frame\n", tx->dev->name,
436 print_mac(mac, sta->addr)); 433 print_mac(mac, sta->addr));
437 } 434 }
438#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 435#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
439 sta->flags &= ~WLAN_STA_PSPOLL; 436 clear_sta_flags(sta, WLAN_STA_PSPOLL);
440 437
441 return TX_CONTINUE; 438 return TX_CONTINUE;
442} 439}
443 440
444static ieee80211_tx_result 441static ieee80211_tx_result debug_noinline
445ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 442ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
446{ 443{
447 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 444 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
@@ -453,21 +450,22 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
453 return ieee80211_tx_h_multicast_ps_buf(tx); 450 return ieee80211_tx_h_multicast_ps_buf(tx);
454} 451}
455 452
456static ieee80211_tx_result 453static ieee80211_tx_result debug_noinline
457ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 454ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
458{ 455{
459 struct ieee80211_key *key; 456 struct ieee80211_key *key;
457 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
460 u16 fc = tx->fc; 458 u16 fc = tx->fc;
461 459
462 if (unlikely(tx->control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 460 if (unlikely(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
463 tx->key = NULL; 461 tx->key = NULL;
464 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 462 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
465 tx->key = key; 463 tx->key = key;
466 else if ((key = rcu_dereference(tx->sdata->default_key))) 464 else if ((key = rcu_dereference(tx->sdata->default_key)))
467 tx->key = key; 465 tx->key = key;
468 else if (tx->sdata->drop_unencrypted && 466 else if (tx->sdata->drop_unencrypted &&
469 !(tx->control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && 467 !(info->flags & IEEE80211_TX_CTL_EAPOL_FRAME) &&
470 !(tx->flags & IEEE80211_TX_INJECTED)) { 468 !(info->flags & IEEE80211_TX_CTL_INJECTED)) {
471 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 469 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
472 return TX_DROP; 470 return TX_DROP;
473 } else 471 } else
@@ -496,15 +494,154 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
496 } 494 }
497 495
498 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 496 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
499 tx->control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 497 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
500 498
501 return TX_CONTINUE; 499 return TX_CONTINUE;
502} 500}
503 501
504static ieee80211_tx_result 502static ieee80211_tx_result debug_noinline
503ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
504{
505 struct rate_selection rsel;
506 struct ieee80211_supported_band *sband;
507 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
508
509 sband = tx->local->hw.wiphy->bands[tx->channel->band];
510
511 if (likely(tx->rate_idx < 0)) {
512 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
513 tx->rate_idx = rsel.rate_idx;
514 if (unlikely(rsel.probe_idx >= 0)) {
515 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
516 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
517 info->control.alt_retry_rate_idx = tx->rate_idx;
518 tx->rate_idx = rsel.probe_idx;
519 } else
520 info->control.alt_retry_rate_idx = -1;
521
522 if (unlikely(tx->rate_idx < 0))
523 return TX_DROP;
524 } else
525 info->control.alt_retry_rate_idx = -1;
526
527 if (tx->sdata->bss_conf.use_cts_prot &&
528 (tx->flags & IEEE80211_TX_FRAGMENTED) && (rsel.nonerp_idx >= 0)) {
529 tx->last_frag_rate_idx = tx->rate_idx;
530 if (rsel.probe_idx >= 0)
531 tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG;
532 else
533 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
534 tx->rate_idx = rsel.nonerp_idx;
535 info->tx_rate_idx = rsel.nonerp_idx;
536 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
537 } else {
538 tx->last_frag_rate_idx = tx->rate_idx;
539 info->tx_rate_idx = tx->rate_idx;
540 }
541 info->tx_rate_idx = tx->rate_idx;
542
543 return TX_CONTINUE;
544}
545
546static ieee80211_tx_result debug_noinline
547ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
548{
549 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
550 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
551 struct ieee80211_supported_band *sband;
552
553 sband = tx->local->hw.wiphy->bands[tx->channel->band];
554
555 if (tx->sta)
556 info->control.aid = tx->sta->aid;
557
558 if (!info->control.retry_limit) {
559 if (!is_multicast_ether_addr(hdr->addr1)) {
560 int len = min_t(int, tx->skb->len + FCS_LEN,
561 tx->local->fragmentation_threshold);
562 if (len > tx->local->rts_threshold
563 && tx->local->rts_threshold <
564 IEEE80211_MAX_RTS_THRESHOLD) {
565 info->flags |= IEEE80211_TX_CTL_USE_RTS_CTS;
566 info->flags |=
567 IEEE80211_TX_CTL_LONG_RETRY_LIMIT;
568 info->control.retry_limit =
569 tx->local->long_retry_limit;
570 } else {
571 info->control.retry_limit =
572 tx->local->short_retry_limit;
573 }
574 } else {
575 info->control.retry_limit = 1;
576 }
577 }
578
579 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
580 /* Do not use multiple retry rates when sending fragmented
581 * frames.
582 * TODO: The last fragment could still use multiple retry
583 * rates. */
584 info->control.alt_retry_rate_idx = -1;
585 }
586
587 /* Use CTS protection for unicast frames sent using extended rates if
588 * there are associated non-ERP stations and RTS/CTS is not configured
589 * for the frame. */
590 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
591 (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_ERP_G) &&
592 (tx->flags & IEEE80211_TX_UNICAST) &&
593 tx->sdata->bss_conf.use_cts_prot &&
594 !(info->flags & IEEE80211_TX_CTL_USE_RTS_CTS))
595 info->flags |= IEEE80211_TX_CTL_USE_CTS_PROTECT;
596
597 /* Transmit data frames using short preambles if the driver supports
598 * short preambles at the selected rate and short preambles are
599 * available on the network at the current point in time. */
600 if (ieee80211_is_data(hdr->frame_control) &&
601 (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
602 tx->sdata->bss_conf.use_short_preamble &&
603 (!tx->sta || test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))) {
604 info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
605 }
606
607 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
608 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) {
609 struct ieee80211_rate *rate;
610 s8 baserate = -1;
611 int idx;
612
613 /* Do not use multiple retry rates when using RTS/CTS */
614 info->control.alt_retry_rate_idx = -1;
615
616 /* Use min(data rate, max base rate) as CTS/RTS rate */
617 rate = &sband->bitrates[tx->rate_idx];
618
619 for (idx = 0; idx < sband->n_bitrates; idx++) {
620 if (sband->bitrates[idx].bitrate > rate->bitrate)
621 continue;
622 if (tx->sdata->basic_rates & BIT(idx) &&
623 (baserate < 0 ||
624 (sband->bitrates[baserate].bitrate
625 < sband->bitrates[idx].bitrate)))
626 baserate = idx;
627 }
628
629 if (baserate >= 0)
630 info->control.rts_cts_rate_idx = baserate;
631 else
632 info->control.rts_cts_rate_idx = 0;
633 }
634
635 if (tx->sta)
636 info->control.aid = tx->sta->aid;
637
638 return TX_CONTINUE;
639}
640
641static ieee80211_tx_result debug_noinline
505ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) 642ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
506{ 643{
507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 644 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
508 size_t hdrlen, per_fragm, num_fragm, payload_len, left; 645 size_t hdrlen, per_fragm, num_fragm, payload_len, left;
509 struct sk_buff **frags, *first, *frag; 646 struct sk_buff **frags, *first, *frag;
510 int i; 647 int i;
@@ -515,9 +652,19 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
515 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 652 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
516 return TX_CONTINUE; 653 return TX_CONTINUE;
517 654
655 /*
656 * Warn when submitting a fragmented A-MPDU frame and drop it.
657 * This scenario is handled in __ieee80211_tx_prepare but extra
658 * caution taken here as fragmented ampdu may cause Tx stop.
659 */
660 if (WARN_ON(tx->flags & IEEE80211_TX_CTL_AMPDU ||
661 skb_get_queue_mapping(tx->skb) >=
662 ieee80211_num_regular_queues(&tx->local->hw)))
663 return TX_DROP;
664
518 first = tx->skb; 665 first = tx->skb;
519 666
520 hdrlen = ieee80211_get_hdrlen(tx->fc); 667 hdrlen = ieee80211_hdrlen(hdr->frame_control);
521 payload_len = first->len - hdrlen; 668 payload_len = first->len - hdrlen;
522 per_fragm = frag_threshold - hdrlen - FCS_LEN; 669 per_fragm = frag_threshold - hdrlen - FCS_LEN;
523 num_fragm = DIV_ROUND_UP(payload_len, per_fragm); 670 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
@@ -558,6 +705,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
558 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); 705 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
559 copylen = left > per_fragm ? per_fragm : left; 706 copylen = left > per_fragm ? per_fragm : left;
560 memcpy(skb_put(frag, copylen), pos, copylen); 707 memcpy(skb_put(frag, copylen), pos, copylen);
708 memcpy(frag->cb, first->cb, sizeof(frag->cb));
709 skb_copy_queue_mapping(frag, first);
561 710
562 pos += copylen; 711 pos += copylen;
563 left -= copylen; 712 left -= copylen;
@@ -570,7 +719,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
570 return TX_CONTINUE; 719 return TX_CONTINUE;
571 720
572 fail: 721 fail:
573 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name);
574 if (frags) { 722 if (frags) {
575 for (i = 0; i < num_fragm - 1; i++) 723 for (i = 0; i < num_fragm - 1; i++)
576 if (frags[i]) 724 if (frags[i])
@@ -581,7 +729,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
581 return TX_DROP; 729 return TX_DROP;
582} 730}
583 731
584static ieee80211_tx_result 732static ieee80211_tx_result debug_noinline
585ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) 733ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
586{ 734{
587 if (!tx->key) 735 if (!tx->key)
@@ -601,236 +749,57 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
601 return TX_DROP; 749 return TX_DROP;
602} 750}
603 751
604static ieee80211_tx_result 752static ieee80211_tx_result debug_noinline
605ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) 753ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
606{ 754{
607 struct rate_selection rsel; 755 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
608 struct ieee80211_supported_band *sband; 756 int next_len, i;
609 757 int group_addr = is_multicast_ether_addr(hdr->addr1);
610 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band];
611
612 if (likely(!tx->rate)) {
613 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
614 tx->rate = rsel.rate;
615 if (unlikely(rsel.probe)) {
616 tx->control->flags |=
617 IEEE80211_TXCTL_RATE_CTRL_PROBE;
618 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
619 tx->control->alt_retry_rate = tx->rate;
620 tx->rate = rsel.probe;
621 } else
622 tx->control->alt_retry_rate = NULL;
623
624 if (!tx->rate)
625 return TX_DROP;
626 } else
627 tx->control->alt_retry_rate = NULL;
628 758
629 if (tx->sdata->bss_conf.use_cts_prot && 759 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
630 (tx->flags & IEEE80211_TX_FRAGMENTED) && rsel.nonerp) { 760 hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
631 tx->last_frag_rate = tx->rate; 761 return TX_CONTINUE;
632 if (rsel.probe)
633 tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG;
634 else
635 tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG;
636 tx->rate = rsel.nonerp;
637 tx->control->tx_rate = rsel.nonerp;
638 tx->control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE;
639 } else {
640 tx->last_frag_rate = tx->rate;
641 tx->control->tx_rate = tx->rate;
642 } 762 }
643 tx->control->tx_rate = tx->rate;
644 763
645 return TX_CONTINUE; 764 hdr->duration_id = ieee80211_duration(tx, group_addr,
646} 765 tx->extra_frag[0]->len);
647
648static ieee80211_tx_result
649ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
650{
651 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
652 u16 fc = le16_to_cpu(hdr->frame_control);
653 u16 dur;
654 struct ieee80211_tx_control *control = tx->control;
655 766
656 if (!control->retry_limit) { 767 for (i = 0; i < tx->num_extra_frag; i++) {
657 if (!is_multicast_ether_addr(hdr->addr1)) { 768 if (i + 1 < tx->num_extra_frag) {
658 if (tx->skb->len + FCS_LEN > tx->local->rts_threshold 769 next_len = tx->extra_frag[i + 1]->len;
659 && tx->local->rts_threshold <
660 IEEE80211_MAX_RTS_THRESHOLD) {
661 control->flags |=
662 IEEE80211_TXCTL_USE_RTS_CTS;
663 control->flags |=
664 IEEE80211_TXCTL_LONG_RETRY_LIMIT;
665 control->retry_limit =
666 tx->local->long_retry_limit;
667 } else {
668 control->retry_limit =
669 tx->local->short_retry_limit;
670 }
671 } else { 770 } else {
672 control->retry_limit = 1; 771 next_len = 0;
772 tx->rate_idx = tx->last_frag_rate_idx;
673 } 773 }
674 }
675 774
676 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 775 hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data;
677 /* Do not use multiple retry rates when sending fragmented 776 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
678 * frames.
679 * TODO: The last fragment could still use multiple retry
680 * rates. */
681 control->alt_retry_rate = NULL;
682 }
683
684 /* Use CTS protection for unicast frames sent using extended rates if
685 * there are associated non-ERP stations and RTS/CTS is not configured
686 * for the frame. */
687 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
688 (tx->rate->flags & IEEE80211_RATE_ERP_G) &&
689 (tx->flags & IEEE80211_TX_UNICAST) &&
690 tx->sdata->bss_conf.use_cts_prot &&
691 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS))
692 control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT;
693
694 /* Transmit data frames using short preambles if the driver supports
695 * short preambles at the selected rate and short preambles are
696 * available on the network at the current point in time. */
697 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
698 (tx->rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
699 tx->sdata->bss_conf.use_short_preamble &&
700 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) {
701 tx->control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
702 }
703
704 /* Setup duration field for the first fragment of the frame. Duration
705 * for remaining fragments will be updated when they are being sent
706 * to low-level driver in ieee80211_tx(). */
707 dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1),
708 (tx->flags & IEEE80211_TX_FRAGMENTED) ?
709 tx->extra_frag[0]->len : 0);
710 hdr->duration_id = cpu_to_le16(dur);
711
712 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) ||
713 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) {
714 struct ieee80211_supported_band *sband;
715 struct ieee80211_rate *rate, *baserate;
716 int idx;
717
718 sband = tx->local->hw.wiphy->bands[
719 tx->local->hw.conf.channel->band];
720
721 /* Do not use multiple retry rates when using RTS/CTS */
722 control->alt_retry_rate = NULL;
723
724 /* Use min(data rate, max base rate) as CTS/RTS rate */
725 rate = tx->rate;
726 baserate = NULL;
727
728 for (idx = 0; idx < sband->n_bitrates; idx++) {
729 if (sband->bitrates[idx].bitrate > rate->bitrate)
730 continue;
731 if (tx->sdata->basic_rates & BIT(idx) &&
732 (!baserate ||
733 (baserate->bitrate < sband->bitrates[idx].bitrate)))
734 baserate = &sband->bitrates[idx];
735 }
736
737 if (baserate)
738 control->rts_cts_rate = baserate;
739 else
740 control->rts_cts_rate = &sband->bitrates[0];
741 }
742
743 if (tx->sta) {
744 control->aid = tx->sta->aid;
745 tx->sta->tx_packets++;
746 tx->sta->tx_fragments++;
747 tx->sta->tx_bytes += tx->skb->len;
748 if (tx->extra_frag) {
749 int i;
750 tx->sta->tx_fragments += tx->num_extra_frag;
751 for (i = 0; i < tx->num_extra_frag; i++) {
752 tx->sta->tx_bytes +=
753 tx->extra_frag[i]->len;
754 }
755 }
756 } 777 }
757 778
758 return TX_CONTINUE; 779 return TX_CONTINUE;
759} 780}
760 781
761static ieee80211_tx_result 782static ieee80211_tx_result debug_noinline
762ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx) 783ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
763{ 784{
764 struct ieee80211_local *local = tx->local; 785 int i;
765 struct sk_buff *skb = tx->skb;
766 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
767 u32 load = 0, hdrtime;
768 struct ieee80211_rate *rate = tx->rate;
769
770 /* TODO: this could be part of tx_status handling, so that the number
771 * of retries would be known; TX rate should in that case be stored
772 * somewhere with the packet */
773
774 /* Estimate total channel use caused by this frame */
775
776 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
777 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
778
779 if (tx->channel->band == IEEE80211_BAND_5GHZ ||
780 (tx->channel->band == IEEE80211_BAND_2GHZ &&
781 rate->flags & IEEE80211_RATE_ERP_G))
782 hdrtime = CHAN_UTIL_HDR_SHORT;
783 else
784 hdrtime = CHAN_UTIL_HDR_LONG;
785
786 load = hdrtime;
787 if (!is_multicast_ether_addr(hdr->addr1))
788 load += hdrtime;
789
790 if (tx->control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
791 load += 2 * hdrtime;
792 else if (tx->control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
793 load += hdrtime;
794 786
795 /* TODO: optimise again */ 787 if (!tx->sta)
796 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; 788 return TX_CONTINUE;
797 789
790 tx->sta->tx_packets++;
791 tx->sta->tx_fragments++;
792 tx->sta->tx_bytes += tx->skb->len;
798 if (tx->extra_frag) { 793 if (tx->extra_frag) {
799 int i; 794 tx->sta->tx_fragments += tx->num_extra_frag;
800 for (i = 0; i < tx->num_extra_frag; i++) { 795 for (i = 0; i < tx->num_extra_frag; i++)
801 load += 2 * hdrtime; 796 tx->sta->tx_bytes += tx->extra_frag[i]->len;
802 load += tx->extra_frag[i]->len *
803 tx->rate->bitrate;
804 }
805 } 797 }
806 798
807 /* Divide channel_use by 8 to avoid wrapping around the counter */
808 load >>= CHAN_UTIL_SHIFT;
809 local->channel_use_raw += load;
810 if (tx->sta)
811 tx->sta->channel_use_raw += load;
812 tx->sdata->channel_use_raw += load;
813
814 return TX_CONTINUE; 799 return TX_CONTINUE;
815} 800}
816 801
817 802
818typedef ieee80211_tx_result (*ieee80211_tx_handler)(struct ieee80211_tx_data *);
819static ieee80211_tx_handler ieee80211_tx_handlers[] =
820{
821 ieee80211_tx_h_check_assoc,
822 ieee80211_tx_h_sequence,
823 ieee80211_tx_h_ps_buf,
824 ieee80211_tx_h_select_key,
825 ieee80211_tx_h_michael_mic_add,
826 ieee80211_tx_h_fragment,
827 ieee80211_tx_h_encrypt,
828 ieee80211_tx_h_rate_ctrl,
829 ieee80211_tx_h_misc,
830 ieee80211_tx_h_load_stats,
831 NULL
832};
833
834/* actual transmit path */ 803/* actual transmit path */
835 804
836/* 805/*
@@ -854,12 +823,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
854 (struct ieee80211_radiotap_header *) skb->data; 823 (struct ieee80211_radiotap_header *) skb->data;
855 struct ieee80211_supported_band *sband; 824 struct ieee80211_supported_band *sband;
856 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 825 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
857 struct ieee80211_tx_control *control = tx->control; 826 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
858 827
859 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; 828 sband = tx->local->hw.wiphy->bands[tx->channel->band];
860 829
861 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 830 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
862 tx->flags |= IEEE80211_TX_INJECTED; 831 info->flags |= IEEE80211_TX_CTL_INJECTED;
863 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 832 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
864 833
865 /* 834 /*
@@ -896,7 +865,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
896 r = &sband->bitrates[i]; 865 r = &sband->bitrates[i];
897 866
898 if (r->bitrate == target_rate) { 867 if (r->bitrate == target_rate) {
899 tx->rate = r; 868 tx->rate_idx = i;
900 break; 869 break;
901 } 870 }
902 } 871 }
@@ -907,7 +876,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
907 * radiotap uses 0 for 1st ant, mac80211 is 1 for 876 * radiotap uses 0 for 1st ant, mac80211 is 1 for
908 * 1st ant 877 * 1st ant
909 */ 878 */
910 control->antenna_sel_tx = (*iterator.this_arg) + 1; 879 info->antenna_sel_tx = (*iterator.this_arg) + 1;
911 break; 880 break;
912 881
913#if 0 882#if 0
@@ -931,8 +900,8 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
931 skb_trim(skb, skb->len - FCS_LEN); 900 skb_trim(skb, skb->len - FCS_LEN);
932 } 901 }
933 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) 902 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
934 control->flags &= 903 info->flags &=
935 ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; 904 ~IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
936 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) 905 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
937 tx->flags |= IEEE80211_TX_FRAGMENTED; 906 tx->flags |= IEEE80211_TX_FRAGMENTED;
938 break; 907 break;
@@ -967,12 +936,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
967static ieee80211_tx_result 936static ieee80211_tx_result
968__ieee80211_tx_prepare(struct ieee80211_tx_data *tx, 937__ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
969 struct sk_buff *skb, 938 struct sk_buff *skb,
970 struct net_device *dev, 939 struct net_device *dev)
971 struct ieee80211_tx_control *control)
972{ 940{
973 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 941 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
974 struct ieee80211_hdr *hdr; 942 struct ieee80211_hdr *hdr;
975 struct ieee80211_sub_if_data *sdata; 943 struct ieee80211_sub_if_data *sdata;
944 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
976 945
977 int hdrlen; 946 int hdrlen;
978 947
@@ -981,7 +950,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
981 tx->dev = dev; /* use original interface */ 950 tx->dev = dev; /* use original interface */
982 tx->local = local; 951 tx->local = local;
983 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); 952 tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev);
984 tx->control = control; 953 tx->channel = local->hw.conf.channel;
954 tx->rate_idx = -1;
955 tx->last_frag_rate_idx = -1;
985 /* 956 /*
986 * Set this flag (used below to indicate "automatic fragmentation"), 957 * Set this flag (used below to indicate "automatic fragmentation"),
987 * it will be cleared/left by radiotap as desired. 958 * it will be cleared/left by radiotap as desired.
@@ -1008,34 +979,33 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1008 979
1009 if (is_multicast_ether_addr(hdr->addr1)) { 980 if (is_multicast_ether_addr(hdr->addr1)) {
1010 tx->flags &= ~IEEE80211_TX_UNICAST; 981 tx->flags &= ~IEEE80211_TX_UNICAST;
1011 control->flags |= IEEE80211_TXCTL_NO_ACK; 982 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1012 } else { 983 } else {
1013 tx->flags |= IEEE80211_TX_UNICAST; 984 tx->flags |= IEEE80211_TX_UNICAST;
1014 control->flags &= ~IEEE80211_TXCTL_NO_ACK; 985 info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
1015 } 986 }
1016 987
1017 if (tx->flags & IEEE80211_TX_FRAGMENTED) { 988 if (tx->flags & IEEE80211_TX_FRAGMENTED) {
1018 if ((tx->flags & IEEE80211_TX_UNICAST) && 989 if ((tx->flags & IEEE80211_TX_UNICAST) &&
1019 skb->len + FCS_LEN > local->fragmentation_threshold && 990 skb->len + FCS_LEN > local->fragmentation_threshold &&
1020 !local->ops->set_frag_threshold) 991 !local->ops->set_frag_threshold &&
992 !(info->flags & IEEE80211_TX_CTL_AMPDU))
1021 tx->flags |= IEEE80211_TX_FRAGMENTED; 993 tx->flags |= IEEE80211_TX_FRAGMENTED;
1022 else 994 else
1023 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 995 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1024 } 996 }
1025 997
1026 if (!tx->sta) 998 if (!tx->sta)
1027 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 999 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1028 else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) { 1000 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1029 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1001 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1030 tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT;
1031 }
1032 1002
1033 hdrlen = ieee80211_get_hdrlen(tx->fc); 1003 hdrlen = ieee80211_get_hdrlen(tx->fc);
1034 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 1004 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1035 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 1005 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1036 tx->ethertype = (pos[0] << 8) | pos[1]; 1006 tx->ethertype = (pos[0] << 8) | pos[1];
1037 } 1007 }
1038 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; 1008 info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
1039 1009
1040 return TX_CONTINUE; 1010 return TX_CONTINUE;
1041} 1011}
@@ -1045,14 +1015,12 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1045 */ 1015 */
1046static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, 1016static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1047 struct sk_buff *skb, 1017 struct sk_buff *skb,
1048 struct net_device *mdev, 1018 struct net_device *mdev)
1049 struct ieee80211_tx_control *control)
1050{ 1019{
1051 struct ieee80211_tx_packet_data *pkt_data; 1020 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1052 struct net_device *dev; 1021 struct net_device *dev;
1053 1022
1054 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1023 dev = dev_get_by_index(&init_net, info->control.ifindex);
1055 dev = dev_get_by_index(&init_net, pkt_data->ifindex);
1056 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { 1024 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) {
1057 dev_put(dev); 1025 dev_put(dev);
1058 dev = NULL; 1026 dev = NULL;
@@ -1060,7 +1028,7 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1060 if (unlikely(!dev)) 1028 if (unlikely(!dev))
1061 return -ENODEV; 1029 return -ENODEV;
1062 /* initialises tx with control */ 1030 /* initialises tx with control */
1063 __ieee80211_tx_prepare(tx, skb, dev, control); 1031 __ieee80211_tx_prepare(tx, skb, dev);
1064 dev_put(dev); 1032 dev_put(dev);
1065 return 0; 1033 return 0;
1066} 1034}
@@ -1068,50 +1036,49 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1068static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1036static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1069 struct ieee80211_tx_data *tx) 1037 struct ieee80211_tx_data *tx)
1070{ 1038{
1071 struct ieee80211_tx_control *control = tx->control; 1039 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1072 int ret, i; 1040 int ret, i;
1073 1041
1074 if (!ieee80211_qdisc_installed(local->mdev) && 1042 if (netif_subqueue_stopped(local->mdev, skb))
1075 __ieee80211_queue_stopped(local, 0)) {
1076 netif_stop_queue(local->mdev);
1077 return IEEE80211_TX_AGAIN; 1043 return IEEE80211_TX_AGAIN;
1078 } 1044
1079 if (skb) { 1045 if (skb) {
1080 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1046 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1081 "TX to low-level driver", skb); 1047 "TX to low-level driver", skb);
1082 ret = local->ops->tx(local_to_hw(local), skb, control); 1048 ret = local->ops->tx(local_to_hw(local), skb);
1083 if (ret) 1049 if (ret)
1084 return IEEE80211_TX_AGAIN; 1050 return IEEE80211_TX_AGAIN;
1085 local->mdev->trans_start = jiffies; 1051 local->mdev->trans_start = jiffies;
1086 ieee80211_led_tx(local, 1); 1052 ieee80211_led_tx(local, 1);
1087 } 1053 }
1088 if (tx->extra_frag) { 1054 if (tx->extra_frag) {
1089 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
1090 IEEE80211_TXCTL_USE_CTS_PROTECT |
1091 IEEE80211_TXCTL_CLEAR_PS_FILT |
1092 IEEE80211_TXCTL_FIRST_FRAGMENT);
1093 for (i = 0; i < tx->num_extra_frag; i++) { 1055 for (i = 0; i < tx->num_extra_frag; i++) {
1094 if (!tx->extra_frag[i]) 1056 if (!tx->extra_frag[i])
1095 continue; 1057 continue;
1096 if (__ieee80211_queue_stopped(local, control->queue)) 1058 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1059 info->flags &= ~(IEEE80211_TX_CTL_USE_RTS_CTS |
1060 IEEE80211_TX_CTL_USE_CTS_PROTECT |
1061 IEEE80211_TX_CTL_CLEAR_PS_FILT |
1062 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1063 if (netif_subqueue_stopped(local->mdev,
1064 tx->extra_frag[i]))
1097 return IEEE80211_TX_FRAG_AGAIN; 1065 return IEEE80211_TX_FRAG_AGAIN;
1098 if (i == tx->num_extra_frag) { 1066 if (i == tx->num_extra_frag) {
1099 control->tx_rate = tx->last_frag_rate; 1067 info->tx_rate_idx = tx->last_frag_rate_idx;
1100 1068
1101 if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) 1069 if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG)
1102 control->flags |= 1070 info->flags |=
1103 IEEE80211_TXCTL_RATE_CTRL_PROBE; 1071 IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1104 else 1072 else
1105 control->flags &= 1073 info->flags &=
1106 ~IEEE80211_TXCTL_RATE_CTRL_PROBE; 1074 ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1107 } 1075 }
1108 1076
1109 ieee80211_dump_frame(wiphy_name(local->hw.wiphy), 1077 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1110 "TX to low-level driver", 1078 "TX to low-level driver",
1111 tx->extra_frag[i]); 1079 tx->extra_frag[i]);
1112 ret = local->ops->tx(local_to_hw(local), 1080 ret = local->ops->tx(local_to_hw(local),
1113 tx->extra_frag[i], 1081 tx->extra_frag[i]);
1114 control);
1115 if (ret) 1082 if (ret)
1116 return IEEE80211_TX_FRAG_AGAIN; 1083 return IEEE80211_TX_FRAG_AGAIN;
1117 local->mdev->trans_start = jiffies; 1084 local->mdev->trans_start = jiffies;
@@ -1124,17 +1091,65 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1124 return IEEE80211_TX_OK; 1091 return IEEE80211_TX_OK;
1125} 1092}
1126 1093
1127static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, 1094/*
1128 struct ieee80211_tx_control *control) 1095 * Invoke TX handlers, return 0 on success and non-zero if the
1096 * frame was dropped or queued.
1097 */
1098static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1099{
1100 struct sk_buff *skb = tx->skb;
1101 ieee80211_tx_result res = TX_DROP;
1102 int i;
1103
1104#define CALL_TXH(txh) \
1105 res = txh(tx); \
1106 if (res != TX_CONTINUE) \
1107 goto txh_done;
1108
1109 CALL_TXH(ieee80211_tx_h_check_assoc)
1110 CALL_TXH(ieee80211_tx_h_sequence)
1111 CALL_TXH(ieee80211_tx_h_ps_buf)
1112 CALL_TXH(ieee80211_tx_h_select_key)
1113 CALL_TXH(ieee80211_tx_h_michael_mic_add)
1114 CALL_TXH(ieee80211_tx_h_rate_ctrl)
1115 CALL_TXH(ieee80211_tx_h_misc)
1116 CALL_TXH(ieee80211_tx_h_fragment)
1117 /* handlers after fragment must be aware of tx info fragmentation! */
1118 CALL_TXH(ieee80211_tx_h_encrypt)
1119 CALL_TXH(ieee80211_tx_h_calculate_duration)
1120 CALL_TXH(ieee80211_tx_h_stats)
1121#undef CALL_TXH
1122
1123 txh_done:
1124 if (unlikely(res == TX_DROP)) {
1125 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1126 dev_kfree_skb(skb);
1127 for (i = 0; i < tx->num_extra_frag; i++)
1128 if (tx->extra_frag[i])
1129 dev_kfree_skb(tx->extra_frag[i]);
1130 kfree(tx->extra_frag);
1131 return -1;
1132 } else if (unlikely(res == TX_QUEUED)) {
1133 I802_DEBUG_INC(tx->local->tx_handlers_queued);
1134 return -1;
1135 }
1136
1137 return 0;
1138}
1139
1140static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1129{ 1141{
1130 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1142 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1131 struct sta_info *sta; 1143 struct sta_info *sta;
1132 ieee80211_tx_handler *handler;
1133 struct ieee80211_tx_data tx; 1144 struct ieee80211_tx_data tx;
1134 ieee80211_tx_result res = TX_DROP, res_prepare; 1145 ieee80211_tx_result res_prepare;
1135 int ret, i, retries = 0; 1146 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1147 int ret, i;
1148 u16 queue;
1149
1150 queue = skb_get_queue_mapping(skb);
1136 1151
1137 WARN_ON(__ieee80211_queue_pending(local, control->queue)); 1152 WARN_ON(test_bit(queue, local->queues_pending));
1138 1153
1139 if (unlikely(skb->len < 10)) { 1154 if (unlikely(skb->len < 10)) {
1140 dev_kfree_skb(skb); 1155 dev_kfree_skb(skb);
@@ -1144,7 +1159,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1144 rcu_read_lock(); 1159 rcu_read_lock();
1145 1160
1146 /* initialises tx */ 1161 /* initialises tx */
1147 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); 1162 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
1148 1163
1149 if (res_prepare == TX_DROP) { 1164 if (res_prepare == TX_DROP) {
1150 dev_kfree_skb(skb); 1165 dev_kfree_skb(skb);
@@ -1154,86 +1169,53 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1154 1169
1155 sta = tx.sta; 1170 sta = tx.sta;
1156 tx.channel = local->hw.conf.channel; 1171 tx.channel = local->hw.conf.channel;
1172 info->band = tx.channel->band;
1157 1173
1158 for (handler = ieee80211_tx_handlers; *handler != NULL; 1174 if (invoke_tx_handlers(&tx))
1159 handler++) { 1175 goto out;
1160 res = (*handler)(&tx);
1161 if (res != TX_CONTINUE)
1162 break;
1163 }
1164
1165 skb = tx.skb; /* handlers are allowed to change skb */
1166
1167 if (unlikely(res == TX_DROP)) {
1168 I802_DEBUG_INC(local->tx_handlers_drop);
1169 goto drop;
1170 }
1171
1172 if (unlikely(res == TX_QUEUED)) {
1173 I802_DEBUG_INC(local->tx_handlers_queued);
1174 rcu_read_unlock();
1175 return 0;
1176 }
1177
1178 if (tx.extra_frag) {
1179 for (i = 0; i < tx.num_extra_frag; i++) {
1180 int next_len, dur;
1181 struct ieee80211_hdr *hdr =
1182 (struct ieee80211_hdr *)
1183 tx.extra_frag[i]->data;
1184
1185 if (i + 1 < tx.num_extra_frag) {
1186 next_len = tx.extra_frag[i + 1]->len;
1187 } else {
1188 next_len = 0;
1189 tx.rate = tx.last_frag_rate;
1190 }
1191 dur = ieee80211_duration(&tx, 0, next_len);
1192 hdr->duration_id = cpu_to_le16(dur);
1193 }
1194 }
1195 1176
1196retry: 1177retry:
1197 ret = __ieee80211_tx(local, skb, &tx); 1178 ret = __ieee80211_tx(local, skb, &tx);
1198 if (ret) { 1179 if (ret) {
1199 struct ieee80211_tx_stored_packet *store = 1180 struct ieee80211_tx_stored_packet *store;
1200 &local->pending_packet[control->queue]; 1181
1182 /*
1183 * Since there are no fragmented frames on A-MPDU
1184 * queues, there's no reason for a driver to reject
1185 * a frame there, warn and drop it.
1186 */
1187 if (WARN_ON(queue >= ieee80211_num_regular_queues(&local->hw)))
1188 goto drop;
1189
1190 store = &local->pending_packet[queue];
1201 1191
1202 if (ret == IEEE80211_TX_FRAG_AGAIN) 1192 if (ret == IEEE80211_TX_FRAG_AGAIN)
1203 skb = NULL; 1193 skb = NULL;
1204 set_bit(IEEE80211_LINK_STATE_PENDING, 1194 set_bit(queue, local->queues_pending);
1205 &local->state[control->queue]);
1206 smp_mb(); 1195 smp_mb();
1207 /* When the driver gets out of buffers during sending of 1196 /*
1208 * fragments and calls ieee80211_stop_queue, there is 1197 * When the driver gets out of buffers during sending of
1209 * a small window between IEEE80211_LINK_STATE_XOFF and 1198 * fragments and calls ieee80211_stop_queue, the netif
1210 * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer 1199 * subqueue is stopped. There is, however, a small window
1200 * in which the PENDING bit is not yet set. If a buffer
1211 * gets available in that window (i.e. driver calls 1201 * gets available in that window (i.e. driver calls
1212 * ieee80211_wake_queue), we would end up with ieee80211_tx 1202 * ieee80211_wake_queue), we would end up with ieee80211_tx
1213 * called with IEEE80211_LINK_STATE_PENDING. Prevent this by 1203 * called with the PENDING bit still set. Prevent this by
1214 * continuing transmitting here when that situation is 1204 * continuing transmitting here when that situation is
1215 * possible to have happened. */ 1205 * possible to have happened.
1216 if (!__ieee80211_queue_stopped(local, control->queue)) { 1206 */
1217 clear_bit(IEEE80211_LINK_STATE_PENDING, 1207 if (!__netif_subqueue_stopped(local->mdev, queue)) {
1218 &local->state[control->queue]); 1208 clear_bit(queue, local->queues_pending);
1219 retries++;
1220 /*
1221 * Driver bug, it's rejecting packets but
1222 * not stopping queues.
1223 */
1224 if (WARN_ON_ONCE(retries > 5))
1225 goto drop;
1226 goto retry; 1209 goto retry;
1227 } 1210 }
1228 memcpy(&store->control, control,
1229 sizeof(struct ieee80211_tx_control));
1230 store->skb = skb; 1211 store->skb = skb;
1231 store->extra_frag = tx.extra_frag; 1212 store->extra_frag = tx.extra_frag;
1232 store->num_extra_frag = tx.num_extra_frag; 1213 store->num_extra_frag = tx.num_extra_frag;
1233 store->last_frag_rate = tx.last_frag_rate; 1214 store->last_frag_rate_idx = tx.last_frag_rate_idx;
1234 store->last_frag_rate_ctrl_probe = 1215 store->last_frag_rate_ctrl_probe =
1235 !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); 1216 !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG);
1236 } 1217 }
1218 out:
1237 rcu_read_unlock(); 1219 rcu_read_unlock();
1238 return 0; 1220 return 0;
1239 1221
@@ -1250,24 +1232,57 @@ retry:
1250 1232
1251/* device xmit handlers */ 1233/* device xmit handlers */
1252 1234
1235static int ieee80211_skb_resize(struct ieee80211_local *local,
1236 struct sk_buff *skb,
1237 int head_need, bool may_encrypt)
1238{
1239 int tail_need = 0;
1240
1241 /*
1242 * This could be optimised, devices that do full hardware
1243 * crypto (including TKIP MMIC) need no tailroom... But we
1244 * have no drivers for such devices currently.
1245 */
1246 if (may_encrypt) {
1247 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1248 tail_need -= skb_tailroom(skb);
1249 tail_need = max_t(int, tail_need, 0);
1250 }
1251
1252 if (head_need || tail_need) {
1253 /* Sorry. Can't account for this any more */
1254 skb_orphan(skb);
1255 }
1256
1257 if (skb_header_cloned(skb))
1258 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1259 else
1260 I802_DEBUG_INC(local->tx_expand_skb_head);
1261
1262 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1263 printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n",
1264 wiphy_name(local->hw.wiphy));
1265 return -ENOMEM;
1266 }
1267
1268 /* update truesize too */
1269 skb->truesize += head_need + tail_need;
1270
1271 return 0;
1272}
1273
1253int ieee80211_master_start_xmit(struct sk_buff *skb, 1274int ieee80211_master_start_xmit(struct sk_buff *skb,
1254 struct net_device *dev) 1275 struct net_device *dev)
1255{ 1276{
1256 struct ieee80211_tx_control control; 1277 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1257 struct ieee80211_tx_packet_data *pkt_data;
1258 struct net_device *odev = NULL; 1278 struct net_device *odev = NULL;
1259 struct ieee80211_sub_if_data *osdata; 1279 struct ieee80211_sub_if_data *osdata;
1260 int headroom; 1280 int headroom;
1281 bool may_encrypt;
1261 int ret; 1282 int ret;
1262 1283
1263 /* 1284 if (info->control.ifindex)
1264 * copy control out of the skb so other people can use skb->cb 1285 odev = dev_get_by_index(&init_net, info->control.ifindex);
1265 */
1266 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1267 memset(&control, 0, sizeof(struct ieee80211_tx_control));
1268
1269 if (pkt_data->ifindex)
1270 odev = dev_get_by_index(&init_net, pkt_data->ifindex);
1271 if (unlikely(odev && !is_ieee80211_device(odev, dev))) { 1286 if (unlikely(odev && !is_ieee80211_device(odev, dev))) {
1272 dev_put(odev); 1287 dev_put(odev);
1273 odev = NULL; 1288 odev = NULL;
@@ -1280,32 +1295,25 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1280 dev_kfree_skb(skb); 1295 dev_kfree_skb(skb);
1281 return 0; 1296 return 0;
1282 } 1297 }
1298
1283 osdata = IEEE80211_DEV_TO_SUB_IF(odev); 1299 osdata = IEEE80211_DEV_TO_SUB_IF(odev);
1284 1300
1285 headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM; 1301 may_encrypt = !(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT);
1286 if (skb_headroom(skb) < headroom) { 1302
1287 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { 1303 headroom = osdata->local->tx_headroom;
1288 dev_kfree_skb(skb); 1304 if (may_encrypt)
1289 dev_put(odev); 1305 headroom += IEEE80211_ENCRYPT_HEADROOM;
1290 return 0; 1306 headroom -= skb_headroom(skb);
1291 } 1307 headroom = max_t(int, 0, headroom);
1308
1309 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
1310 dev_kfree_skb(skb);
1311 dev_put(odev);
1312 return 0;
1292 } 1313 }
1293 1314
1294 control.vif = &osdata->vif; 1315 info->control.vif = &osdata->vif;
1295 control.type = osdata->vif.type; 1316 ret = ieee80211_tx(odev, skb);
1296 if (pkt_data->flags & IEEE80211_TXPD_REQ_TX_STATUS)
1297 control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS;
1298 if (pkt_data->flags & IEEE80211_TXPD_DO_NOT_ENCRYPT)
1299 control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
1300 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE)
1301 control.flags |= IEEE80211_TXCTL_REQUEUE;
1302 if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME)
1303 control.flags |= IEEE80211_TXCTL_EAPOL_FRAME;
1304 if (pkt_data->flags & IEEE80211_TXPD_AMPDU)
1305 control.flags |= IEEE80211_TXCTL_AMPDU;
1306 control.queue = pkt_data->queue;
1307
1308 ret = ieee80211_tx(odev, skb, &control);
1309 dev_put(odev); 1317 dev_put(odev);
1310 1318
1311 return ret; 1319 return ret;
@@ -1315,7 +1323,7 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1315 struct net_device *dev) 1323 struct net_device *dev)
1316{ 1324{
1317 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1325 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1318 struct ieee80211_tx_packet_data *pkt_data; 1326 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1319 struct ieee80211_radiotap_header *prthdr = 1327 struct ieee80211_radiotap_header *prthdr =
1320 (struct ieee80211_radiotap_header *)skb->data; 1328 (struct ieee80211_radiotap_header *)skb->data;
1321 u16 len_rthdr; 1329 u16 len_rthdr;
@@ -1337,12 +1345,12 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
1337 1345
1338 skb->dev = local->mdev; 1346 skb->dev = local->mdev;
1339 1347
1340 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
1341 memset(pkt_data, 0, sizeof(*pkt_data));
1342 /* needed because we set skb device to master */ 1348 /* needed because we set skb device to master */
1343 pkt_data->ifindex = dev->ifindex; 1349 info->control.ifindex = dev->ifindex;
1344 1350
1345 pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; 1351 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
1352 /* Interfaces should always request a status report */
1353 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1346 1354
1347 /* 1355 /*
1348 * fix up the pointers accounting for the radiotap 1356 * fix up the pointers accounting for the radiotap
@@ -1386,10 +1394,11 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1386 struct net_device *dev) 1394 struct net_device *dev)
1387{ 1395{
1388 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1396 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1389 struct ieee80211_tx_packet_data *pkt_data; 1397 struct ieee80211_tx_info *info;
1390 struct ieee80211_sub_if_data *sdata; 1398 struct ieee80211_sub_if_data *sdata;
1391 int ret = 1, head_need; 1399 int ret = 1, head_need;
1392 u16 ethertype, hdrlen, meshhdrlen = 0, fc; 1400 u16 ethertype, hdrlen, meshhdrlen = 0;
1401 __le16 fc;
1393 struct ieee80211_hdr hdr; 1402 struct ieee80211_hdr hdr;
1394 struct ieee80211s_hdr mesh_hdr; 1403 struct ieee80211s_hdr mesh_hdr;
1395 const u8 *encaps_data; 1404 const u8 *encaps_data;
@@ -1400,8 +1409,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1400 1409
1401 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1410 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1402 if (unlikely(skb->len < ETH_HLEN)) { 1411 if (unlikely(skb->len < ETH_HLEN)) {
1403 printk(KERN_DEBUG "%s: short skb (len=%d)\n",
1404 dev->name, skb->len);
1405 ret = 0; 1412 ret = 0;
1406 goto fail; 1413 goto fail;
1407 } 1414 }
@@ -1412,12 +1419,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1412 /* convert Ethernet header to proper 802.11 header (based on 1419 /* convert Ethernet header to proper 802.11 header (based on
1413 * operation mode) */ 1420 * operation mode) */
1414 ethertype = (skb->data[12] << 8) | skb->data[13]; 1421 ethertype = (skb->data[12] << 8) | skb->data[13];
1415 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; 1422 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1416 1423
1417 switch (sdata->vif.type) { 1424 switch (sdata->vif.type) {
1418 case IEEE80211_IF_TYPE_AP: 1425 case IEEE80211_IF_TYPE_AP:
1419 case IEEE80211_IF_TYPE_VLAN: 1426 case IEEE80211_IF_TYPE_VLAN:
1420 fc |= IEEE80211_FCTL_FROMDS; 1427 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1421 /* DA BSSID SA */ 1428 /* DA BSSID SA */
1422 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1429 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1423 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1430 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
@@ -1425,7 +1432,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1425 hdrlen = 24; 1432 hdrlen = 24;
1426 break; 1433 break;
1427 case IEEE80211_IF_TYPE_WDS: 1434 case IEEE80211_IF_TYPE_WDS:
1428 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; 1435 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1429 /* RA TA DA SA */ 1436 /* RA TA DA SA */
1430 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1437 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1431 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1438 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
@@ -1435,7 +1442,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1435 break; 1442 break;
1436#ifdef CONFIG_MAC80211_MESH 1443#ifdef CONFIG_MAC80211_MESH
1437 case IEEE80211_IF_TYPE_MESH_POINT: 1444 case IEEE80211_IF_TYPE_MESH_POINT:
1438 fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; 1445 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1439 /* RA TA DA SA */ 1446 /* RA TA DA SA */
1440 if (is_multicast_ether_addr(skb->data)) 1447 if (is_multicast_ether_addr(skb->data))
1441 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1448 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1465,7 +1472,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1465 break; 1472 break;
1466#endif 1473#endif
1467 case IEEE80211_IF_TYPE_STA: 1474 case IEEE80211_IF_TYPE_STA:
1468 fc |= IEEE80211_FCTL_TODS; 1475 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1469 /* BSSID SA DA */ 1476 /* BSSID SA DA */
1470 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); 1477 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN);
1471 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1478 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1493,13 +1500,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1493 rcu_read_lock(); 1500 rcu_read_lock();
1494 sta = sta_info_get(local, hdr.addr1); 1501 sta = sta_info_get(local, hdr.addr1);
1495 if (sta) 1502 if (sta)
1496 sta_flags = sta->flags; 1503 sta_flags = get_sta_flags(sta);
1497 rcu_read_unlock(); 1504 rcu_read_unlock();
1498 } 1505 }
1499 1506
1500 /* receiver is QoS enabled, use a QoS type frame */ 1507 /* receiver and we are QoS enabled, use a QoS type frame */
1501 if (sta_flags & WLAN_STA_WME) { 1508 if (sta_flags & WLAN_STA_WME &&
1502 fc |= IEEE80211_STYPE_QOS_DATA; 1509 ieee80211_num_regular_queues(&local->hw) >= 4) {
1510 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1503 hdrlen += 2; 1511 hdrlen += 2;
1504 } 1512 }
1505 1513
@@ -1527,7 +1535,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1527 goto fail; 1535 goto fail;
1528 } 1536 }
1529 1537
1530 hdr.frame_control = cpu_to_le16(fc); 1538 hdr.frame_control = fc;
1531 hdr.duration_id = 0; 1539 hdr.duration_id = 0;
1532 hdr.seq_ctrl = 0; 1540 hdr.seq_ctrl = 0;
1533 1541
@@ -1562,32 +1570,26 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1562 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and 1570 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1563 * alloc_skb() (net/core/skbuff.c) 1571 * alloc_skb() (net/core/skbuff.c)
1564 */ 1572 */
1565 head_need = hdrlen + encaps_len + meshhdrlen + local->tx_headroom; 1573 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
1566 head_need -= skb_headroom(skb);
1567 1574
1568 /* We are going to modify skb data, so make a copy of it if happens to 1575 /*
1569 * be cloned. This could happen, e.g., with Linux bridge code passing 1576 * So we need to modify the skb header and hence need a copy of
1570 * us broadcast frames. */ 1577 * that. The head_need variable above doesn't, so far, include
1578 * the needed header space that we don't need right away. If we
1579 * can, then we don't reallocate right now but only after the
1580 * frame arrives at the master device (if it does...)
1581 *
1582 * If we cannot, however, then we will reallocate to include all
1583 * the ever needed space. Also, if we need to reallocate it anyway,
1584 * make it big enough for everything we may ever need.
1585 */
1571 1586
1572 if (head_need > 0 || skb_cloned(skb)) { 1587 if (head_need > 0 || skb_cloned(skb)) {
1573#if 0 1588 head_need += IEEE80211_ENCRYPT_HEADROOM;
1574 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " 1589 head_need += local->tx_headroom;
1575 "of headroom\n", dev->name, head_need); 1590 head_need = max_t(int, 0, head_need);
1576#endif 1591 if (ieee80211_skb_resize(local, skb, head_need, true))
1577
1578 if (skb_cloned(skb))
1579 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1580 else
1581 I802_DEBUG_INC(local->tx_expand_skb_head);
1582 /* Since we have to reallocate the buffer, make sure that there
1583 * is enough room for possible WEP IV/ICV and TKIP (8 bytes
1584 * before payload and 12 after). */
1585 if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8),
1586 12, GFP_ATOMIC)) {
1587 printk(KERN_DEBUG "%s: failed to reallocate TX buffer"
1588 "\n", dev->name);
1589 goto fail; 1592 goto fail;
1590 }
1591 } 1593 }
1592 1594
1593 if (encaps_data) { 1595 if (encaps_data) {
@@ -1602,7 +1604,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1602 h_pos += meshhdrlen; 1604 h_pos += meshhdrlen;
1603 } 1605 }
1604 1606
1605 if (fc & IEEE80211_STYPE_QOS_DATA) { 1607 if (ieee80211_is_data_qos(fc)) {
1606 __le16 *qos_control; 1608 __le16 *qos_control;
1607 1609
1608 qos_control = (__le16*) skb_push(skb, 2); 1610 qos_control = (__le16*) skb_push(skb, 2);
@@ -1618,11 +1620,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1618 nh_pos += hdrlen; 1620 nh_pos += hdrlen;
1619 h_pos += hdrlen; 1621 h_pos += hdrlen;
1620 1622
1621 pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; 1623 info = IEEE80211_SKB_CB(skb);
1622 memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); 1624 memset(info, 0, sizeof(*info));
1623 pkt_data->ifindex = dev->ifindex; 1625 info->control.ifindex = dev->ifindex;
1624 if (ethertype == ETH_P_PAE) 1626 if (ethertype == ETH_P_PAE)
1625 pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME; 1627 info->flags |= IEEE80211_TX_CTL_EAPOL_FRAME;
1628
1629 /* Interfaces should always request a status report */
1630 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1626 1631
1627 skb->dev = local->mdev; 1632 skb->dev = local->mdev;
1628 dev->stats.tx_packets++; 1633 dev->stats.tx_packets++;
@@ -1647,46 +1652,55 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1647 return ret; 1652 return ret;
1648} 1653}
1649 1654
1650/* helper functions for pending packets for when queues are stopped */
1651 1655
1656/*
1657 * ieee80211_clear_tx_pending may not be called in a context where
1658 * it is possible that it packets could come in again.
1659 */
1652void ieee80211_clear_tx_pending(struct ieee80211_local *local) 1660void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1653{ 1661{
1654 int i, j; 1662 int i, j;
1655 struct ieee80211_tx_stored_packet *store; 1663 struct ieee80211_tx_stored_packet *store;
1656 1664
1657 for (i = 0; i < local->hw.queues; i++) { 1665 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1658 if (!__ieee80211_queue_pending(local, i)) 1666 if (!test_bit(i, local->queues_pending))
1659 continue; 1667 continue;
1660 store = &local->pending_packet[i]; 1668 store = &local->pending_packet[i];
1661 kfree_skb(store->skb); 1669 kfree_skb(store->skb);
1662 for (j = 0; j < store->num_extra_frag; j++) 1670 for (j = 0; j < store->num_extra_frag; j++)
1663 kfree_skb(store->extra_frag[j]); 1671 kfree_skb(store->extra_frag[j]);
1664 kfree(store->extra_frag); 1672 kfree(store->extra_frag);
1665 clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); 1673 clear_bit(i, local->queues_pending);
1666 } 1674 }
1667} 1675}
1668 1676
1677/*
1678 * Transmit all pending packets. Called from tasklet, locks master device
1679 * TX lock so that no new packets can come in.
1680 */
1669void ieee80211_tx_pending(unsigned long data) 1681void ieee80211_tx_pending(unsigned long data)
1670{ 1682{
1671 struct ieee80211_local *local = (struct ieee80211_local *)data; 1683 struct ieee80211_local *local = (struct ieee80211_local *)data;
1672 struct net_device *dev = local->mdev; 1684 struct net_device *dev = local->mdev;
1673 struct ieee80211_tx_stored_packet *store; 1685 struct ieee80211_tx_stored_packet *store;
1674 struct ieee80211_tx_data tx; 1686 struct ieee80211_tx_data tx;
1675 int i, ret, reschedule = 0; 1687 int i, ret;
1676 1688
1677 netif_tx_lock_bh(dev); 1689 netif_tx_lock_bh(dev);
1678 for (i = 0; i < local->hw.queues; i++) { 1690 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1679 if (__ieee80211_queue_stopped(local, i)) 1691 /* Check that this queue is ok */
1692 if (__netif_subqueue_stopped(local->mdev, i))
1680 continue; 1693 continue;
1681 if (!__ieee80211_queue_pending(local, i)) { 1694
1682 reschedule = 1; 1695 if (!test_bit(i, local->queues_pending)) {
1696 ieee80211_wake_queue(&local->hw, i);
1683 continue; 1697 continue;
1684 } 1698 }
1699
1685 store = &local->pending_packet[i]; 1700 store = &local->pending_packet[i];
1686 tx.control = &store->control;
1687 tx.extra_frag = store->extra_frag; 1701 tx.extra_frag = store->extra_frag;
1688 tx.num_extra_frag = store->num_extra_frag; 1702 tx.num_extra_frag = store->num_extra_frag;
1689 tx.last_frag_rate = store->last_frag_rate; 1703 tx.last_frag_rate_idx = store->last_frag_rate_idx;
1690 tx.flags = 0; 1704 tx.flags = 0;
1691 if (store->last_frag_rate_ctrl_probe) 1705 if (store->last_frag_rate_ctrl_probe)
1692 tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; 1706 tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG;
@@ -1695,19 +1709,11 @@ void ieee80211_tx_pending(unsigned long data)
1695 if (ret == IEEE80211_TX_FRAG_AGAIN) 1709 if (ret == IEEE80211_TX_FRAG_AGAIN)
1696 store->skb = NULL; 1710 store->skb = NULL;
1697 } else { 1711 } else {
1698 clear_bit(IEEE80211_LINK_STATE_PENDING, 1712 clear_bit(i, local->queues_pending);
1699 &local->state[i]); 1713 ieee80211_wake_queue(&local->hw, i);
1700 reschedule = 1;
1701 } 1714 }
1702 } 1715 }
1703 netif_tx_unlock_bh(dev); 1716 netif_tx_unlock_bh(dev);
1704 if (reschedule) {
1705 if (!ieee80211_qdisc_installed(dev)) {
1706 if (!__ieee80211_queue_stopped(local, 0))
1707 netif_wake_queue(dev);
1708 } else
1709 netif_schedule(dev);
1710 }
1711} 1717}
1712 1718
1713/* functions for drivers to get certain frames */ 1719/* functions for drivers to get certain frames */
@@ -1776,11 +1782,11 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local,
1776} 1782}
1777 1783
1778struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, 1784struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1779 struct ieee80211_vif *vif, 1785 struct ieee80211_vif *vif)
1780 struct ieee80211_tx_control *control)
1781{ 1786{
1782 struct ieee80211_local *local = hw_to_local(hw); 1787 struct ieee80211_local *local = hw_to_local(hw);
1783 struct sk_buff *skb; 1788 struct sk_buff *skb;
1789 struct ieee80211_tx_info *info;
1784 struct net_device *bdev; 1790 struct net_device *bdev;
1785 struct ieee80211_sub_if_data *sdata = NULL; 1791 struct ieee80211_sub_if_data *sdata = NULL;
1786 struct ieee80211_if_ap *ap = NULL; 1792 struct ieee80211_if_ap *ap = NULL;
@@ -1790,9 +1796,10 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1790 struct ieee80211_mgmt *mgmt; 1796 struct ieee80211_mgmt *mgmt;
1791 int *num_beacons; 1797 int *num_beacons;
1792 bool err = true; 1798 bool err = true;
1799 enum ieee80211_band band = local->hw.conf.channel->band;
1793 u8 *pos; 1800 u8 *pos;
1794 1801
1795 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1802 sband = local->hw.wiphy->bands[band];
1796 1803
1797 rcu_read_lock(); 1804 rcu_read_lock();
1798 1805
@@ -1855,8 +1862,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1855 mgmt = (struct ieee80211_mgmt *) 1862 mgmt = (struct ieee80211_mgmt *)
1856 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 1863 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
1857 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 1864 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
1858 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1865 mgmt->frame_control =
1859 IEEE80211_STYPE_BEACON); 1866 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
1860 memset(mgmt->da, 0xff, ETH_ALEN); 1867 memset(mgmt->da, 0xff, ETH_ALEN);
1861 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 1868 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1862 /* BSSID is left zeroed, wildcard value */ 1869 /* BSSID is left zeroed, wildcard value */
@@ -1885,30 +1892,32 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1885 goto out; 1892 goto out;
1886 } 1893 }
1887 1894
1888 if (control) { 1895 info = IEEE80211_SKB_CB(skb);
1889 rate_control_get_rate(local->mdev, sband, skb, &rsel);
1890 if (!rsel.rate) {
1891 if (net_ratelimit()) {
1892 printk(KERN_DEBUG "%s: ieee80211_beacon_get: "
1893 "no rate found\n",
1894 wiphy_name(local->hw.wiphy));
1895 }
1896 dev_kfree_skb(skb);
1897 skb = NULL;
1898 goto out;
1899 }
1900 1896
1901 control->vif = vif; 1897 info->band = band;
1902 control->tx_rate = rsel.rate; 1898 rate_control_get_rate(local->mdev, sband, skb, &rsel);
1903 if (sdata->bss_conf.use_short_preamble && 1899
1904 rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) 1900 if (unlikely(rsel.rate_idx < 0)) {
1905 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; 1901 if (net_ratelimit()) {
1906 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1902 printk(KERN_DEBUG "%s: ieee80211_beacon_get: "
1907 control->flags |= IEEE80211_TXCTL_NO_ACK; 1903 "no rate found\n",
1908 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 1904 wiphy_name(local->hw.wiphy));
1909 control->retry_limit = 1; 1905 }
1910 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1906 dev_kfree_skb(skb);
1907 skb = NULL;
1908 goto out;
1911 } 1909 }
1910
1911 info->control.vif = vif;
1912 info->tx_rate_idx = rsel.rate_idx;
1913 if (sdata->bss_conf.use_short_preamble &&
1914 sband->bitrates[rsel.rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE)
1915 info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE;
1916 info->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1917 info->flags |= IEEE80211_TX_CTL_NO_ACK;
1918 info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
1919 info->control.retry_limit = 1;
1920 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1912 (*num_beacons)++; 1921 (*num_beacons)++;
1913out: 1922out:
1914 rcu_read_unlock(); 1923 rcu_read_unlock();
@@ -1918,14 +1927,13 @@ EXPORT_SYMBOL(ieee80211_beacon_get);
1918 1927
1919void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1928void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1920 const void *frame, size_t frame_len, 1929 const void *frame, size_t frame_len,
1921 const struct ieee80211_tx_control *frame_txctl, 1930 const struct ieee80211_tx_info *frame_txctl,
1922 struct ieee80211_rts *rts) 1931 struct ieee80211_rts *rts)
1923{ 1932{
1924 const struct ieee80211_hdr *hdr = frame; 1933 const struct ieee80211_hdr *hdr = frame;
1925 u16 fctl;
1926 1934
1927 fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS; 1935 rts->frame_control =
1928 rts->frame_control = cpu_to_le16(fctl); 1936 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
1929 rts->duration = ieee80211_rts_duration(hw, vif, frame_len, 1937 rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
1930 frame_txctl); 1938 frame_txctl);
1931 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); 1939 memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
@@ -1935,14 +1943,13 @@ EXPORT_SYMBOL(ieee80211_rts_get);
1935 1943
1936void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1944void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1937 const void *frame, size_t frame_len, 1945 const void *frame, size_t frame_len,
1938 const struct ieee80211_tx_control *frame_txctl, 1946 const struct ieee80211_tx_info *frame_txctl,
1939 struct ieee80211_cts *cts) 1947 struct ieee80211_cts *cts)
1940{ 1948{
1941 const struct ieee80211_hdr *hdr = frame; 1949 const struct ieee80211_hdr *hdr = frame;
1942 u16 fctl;
1943 1950
1944 fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS; 1951 cts->frame_control =
1945 cts->frame_control = cpu_to_le16(fctl); 1952 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
1946 cts->duration = ieee80211_ctstoself_duration(hw, vif, 1953 cts->duration = ieee80211_ctstoself_duration(hw, vif,
1947 frame_len, frame_txctl); 1954 frame_len, frame_txctl);
1948 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); 1955 memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
@@ -1951,23 +1958,21 @@ EXPORT_SYMBOL(ieee80211_ctstoself_get);
1951 1958
1952struct sk_buff * 1959struct sk_buff *
1953ieee80211_get_buffered_bc(struct ieee80211_hw *hw, 1960ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1954 struct ieee80211_vif *vif, 1961 struct ieee80211_vif *vif)
1955 struct ieee80211_tx_control *control)
1956{ 1962{
1957 struct ieee80211_local *local = hw_to_local(hw); 1963 struct ieee80211_local *local = hw_to_local(hw);
1958 struct sk_buff *skb; 1964 struct sk_buff *skb = NULL;
1959 struct sta_info *sta; 1965 struct sta_info *sta;
1960 ieee80211_tx_handler *handler;
1961 struct ieee80211_tx_data tx; 1966 struct ieee80211_tx_data tx;
1962 ieee80211_tx_result res = TX_DROP;
1963 struct net_device *bdev; 1967 struct net_device *bdev;
1964 struct ieee80211_sub_if_data *sdata; 1968 struct ieee80211_sub_if_data *sdata;
1965 struct ieee80211_if_ap *bss = NULL; 1969 struct ieee80211_if_ap *bss = NULL;
1966 struct beacon_data *beacon; 1970 struct beacon_data *beacon;
1971 struct ieee80211_tx_info *info;
1967 1972
1968 sdata = vif_to_sdata(vif); 1973 sdata = vif_to_sdata(vif);
1969 bdev = sdata->dev; 1974 bdev = sdata->dev;
1970 1975 bss = &sdata->u.ap;
1971 1976
1972 if (!bss) 1977 if (!bss)
1973 return NULL; 1978 return NULL;
@@ -1975,19 +1980,16 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1975 rcu_read_lock(); 1980 rcu_read_lock();
1976 beacon = rcu_dereference(bss->beacon); 1981 beacon = rcu_dereference(bss->beacon);
1977 1982
1978 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || 1983 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head)
1979 !beacon->head) { 1984 goto out;
1980 rcu_read_unlock();
1981 return NULL;
1982 }
1983 1985
1984 if (bss->dtim_count != 0) 1986 if (bss->dtim_count != 0)
1985 return NULL; /* send buffered bc/mc only after DTIM beacon */ 1987 goto out; /* send buffered bc/mc only after DTIM beacon */
1986 memset(control, 0, sizeof(*control)); 1988
1987 while (1) { 1989 while (1) {
1988 skb = skb_dequeue(&bss->ps_bc_buf); 1990 skb = skb_dequeue(&bss->ps_bc_buf);
1989 if (!skb) 1991 if (!skb)
1990 return NULL; 1992 goto out;
1991 local->total_ps_buffered--; 1993 local->total_ps_buffered--;
1992 1994
1993 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { 1995 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) {
@@ -2000,30 +2002,21 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2000 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2002 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2001 } 2003 }
2002 2004
2003 if (!ieee80211_tx_prepare(&tx, skb, local->mdev, control)) 2005 if (!ieee80211_tx_prepare(&tx, skb, local->mdev))
2004 break; 2006 break;
2005 dev_kfree_skb_any(skb); 2007 dev_kfree_skb_any(skb);
2006 } 2008 }
2009
2010 info = IEEE80211_SKB_CB(skb);
2011
2007 sta = tx.sta; 2012 sta = tx.sta;
2008 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2013 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2009 tx.channel = local->hw.conf.channel; 2014 tx.channel = local->hw.conf.channel;
2015 info->band = tx.channel->band;
2010 2016
2011 for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { 2017 if (invoke_tx_handlers(&tx))
2012 res = (*handler)(&tx);
2013 if (res == TX_DROP || res == TX_QUEUED)
2014 break;
2015 }
2016 skb = tx.skb; /* handlers are allowed to change skb */
2017
2018 if (res == TX_DROP) {
2019 I802_DEBUG_INC(local->tx_handlers_drop);
2020 dev_kfree_skb(skb);
2021 skb = NULL;
2022 } else if (res == TX_QUEUED) {
2023 I802_DEBUG_INC(local->tx_handlers_queued);
2024 skb = NULL; 2018 skb = NULL;
2025 } 2019 out:
2026
2027 rcu_read_unlock(); 2020 rcu_read_unlock();
2028 2021
2029 return skb; 2022 return skb;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 4e97b266f907..ce62b163b82c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -45,38 +45,37 @@ const unsigned char bridge_tunnel_header[] __aligned(2) =
45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
46 enum ieee80211_if_types type) 46 enum ieee80211_if_types type)
47{ 47{
48 u16 fc; 48 __le16 fc = hdr->frame_control;
49 49
50 /* drop ACK/CTS frames and incorrect hdr len (ctrl) */ 50 /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
51 if (len < 16) 51 if (len < 16)
52 return NULL; 52 return NULL;
53 53
54 fc = le16_to_cpu(hdr->frame_control); 54 if (ieee80211_is_data(fc)) {
55
56 switch (fc & IEEE80211_FCTL_FTYPE) {
57 case IEEE80211_FTYPE_DATA:
58 if (len < 24) /* drop incorrect hdr len (data) */ 55 if (len < 24) /* drop incorrect hdr len (data) */
59 return NULL; 56 return NULL;
60 switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 57
61 case IEEE80211_FCTL_TODS: 58 if (ieee80211_has_a4(fc))
62 return hdr->addr1;
63 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
64 return NULL; 59 return NULL;
65 case IEEE80211_FCTL_FROMDS: 60 if (ieee80211_has_tods(fc))
61 return hdr->addr1;
62 if (ieee80211_has_fromds(fc))
66 return hdr->addr2; 63 return hdr->addr2;
67 case 0: 64
68 return hdr->addr3; 65 return hdr->addr3;
69 } 66 }
70 break; 67
71 case IEEE80211_FTYPE_MGMT: 68 if (ieee80211_is_mgmt(fc)) {
72 if (len < 24) /* drop incorrect hdr len (mgmt) */ 69 if (len < 24) /* drop incorrect hdr len (mgmt) */
73 return NULL; 70 return NULL;
74 return hdr->addr3; 71 return hdr->addr3;
75 case IEEE80211_FTYPE_CTL: 72 }
76 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL) 73
74 if (ieee80211_is_ctl(fc)) {
75 if(ieee80211_is_pspoll(fc))
77 return hdr->addr1; 76 return hdr->addr1;
78 else if ((fc & IEEE80211_FCTL_STYPE) == 77
79 IEEE80211_STYPE_BACK_REQ) { 78 if (ieee80211_is_back_req(fc)) {
80 switch (type) { 79 switch (type) {
81 case IEEE80211_IF_TYPE_STA: 80 case IEEE80211_IF_TYPE_STA:
82 return hdr->addr2; 81 return hdr->addr2;
@@ -84,11 +83,9 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
84 case IEEE80211_IF_TYPE_VLAN: 83 case IEEE80211_IF_TYPE_VLAN:
85 return hdr->addr1; 84 return hdr->addr1;
86 default: 85 default:
87 return NULL; 86 break; /* fall through to the return */
88 } 87 }
89 } 88 }
90 else
91 return NULL;
92 } 89 }
93 90
94 return NULL; 91 return NULL;
@@ -133,14 +130,46 @@ int ieee80211_get_hdrlen(u16 fc)
133} 130}
134EXPORT_SYMBOL(ieee80211_get_hdrlen); 131EXPORT_SYMBOL(ieee80211_get_hdrlen);
135 132
136int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) 133unsigned int ieee80211_hdrlen(__le16 fc)
134{
135 unsigned int hdrlen = 24;
136
137 if (ieee80211_is_data(fc)) {
138 if (ieee80211_has_a4(fc))
139 hdrlen = 30;
140 if (ieee80211_is_data_qos(fc))
141 hdrlen += IEEE80211_QOS_CTL_LEN;
142 goto out;
143 }
144
145 if (ieee80211_is_ctl(fc)) {
146 /*
147 * ACK and CTS are 10 bytes, all others 16. To see how
148 * to get this condition consider
149 * subtype mask: 0b0000000011110000 (0x00F0)
150 * ACK subtype: 0b0000000011010000 (0x00D0)
151 * CTS subtype: 0b0000000011000000 (0x00C0)
152 * bits that matter: ^^^ (0x00E0)
153 * value of those: 0b0000000011000000 (0x00C0)
154 */
155 if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0))
156 hdrlen = 10;
157 else
158 hdrlen = 16;
159 }
160out:
161 return hdrlen;
162}
163EXPORT_SYMBOL(ieee80211_hdrlen);
164
165unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
137{ 166{
138 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *) skb->data; 167 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)skb->data;
139 int hdrlen; 168 unsigned int hdrlen;
140 169
141 if (unlikely(skb->len < 10)) 170 if (unlikely(skb->len < 10))
142 return 0; 171 return 0;
143 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); 172 hdrlen = ieee80211_hdrlen(hdr->frame_control);
144 if (unlikely(hdrlen > skb->len)) 173 if (unlikely(hdrlen > skb->len))
145 return 0; 174 return 0;
146 return hdrlen; 175 return hdrlen;
@@ -258,7 +287,7 @@ EXPORT_SYMBOL(ieee80211_generic_frame_duration);
258 287
259__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, 288__le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
260 struct ieee80211_vif *vif, size_t frame_len, 289 struct ieee80211_vif *vif, size_t frame_len,
261 const struct ieee80211_tx_control *frame_txctl) 290 const struct ieee80211_tx_info *frame_txctl)
262{ 291{
263 struct ieee80211_local *local = hw_to_local(hw); 292 struct ieee80211_local *local = hw_to_local(hw);
264 struct ieee80211_rate *rate; 293 struct ieee80211_rate *rate;
@@ -266,10 +295,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
266 bool short_preamble; 295 bool short_preamble;
267 int erp; 296 int erp;
268 u16 dur; 297 u16 dur;
298 struct ieee80211_supported_band *sband;
299
300 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
269 301
270 short_preamble = sdata->bss_conf.use_short_preamble; 302 short_preamble = sdata->bss_conf.use_short_preamble;
271 303
272 rate = frame_txctl->rts_cts_rate; 304 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
273 305
274 erp = 0; 306 erp = 0;
275 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 307 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
@@ -292,7 +324,7 @@ EXPORT_SYMBOL(ieee80211_rts_duration);
292__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, 324__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
293 struct ieee80211_vif *vif, 325 struct ieee80211_vif *vif,
294 size_t frame_len, 326 size_t frame_len,
295 const struct ieee80211_tx_control *frame_txctl) 327 const struct ieee80211_tx_info *frame_txctl)
296{ 328{
297 struct ieee80211_local *local = hw_to_local(hw); 329 struct ieee80211_local *local = hw_to_local(hw);
298 struct ieee80211_rate *rate; 330 struct ieee80211_rate *rate;
@@ -300,10 +332,13 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
300 bool short_preamble; 332 bool short_preamble;
301 int erp; 333 int erp;
302 u16 dur; 334 u16 dur;
335 struct ieee80211_supported_band *sband;
336
337 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
303 338
304 short_preamble = sdata->bss_conf.use_short_preamble; 339 short_preamble = sdata->bss_conf.use_short_preamble;
305 340
306 rate = frame_txctl->rts_cts_rate; 341 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
307 erp = 0; 342 erp = 0;
308 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 343 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
309 erp = rate->flags & IEEE80211_RATE_ERP_G; 344 erp = rate->flags & IEEE80211_RATE_ERP_G;
@@ -311,7 +346,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
311 /* Data frame duration */ 346 /* Data frame duration */
312 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, 347 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
313 erp, short_preamble); 348 erp, short_preamble);
314 if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { 349 if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
315 /* ACK duration */ 350 /* ACK duration */
316 dur += ieee80211_frame_duration(local, 10, rate->bitrate, 351 dur += ieee80211_frame_duration(local, 10, rate->bitrate,
317 erp, short_preamble); 352 erp, short_preamble);
@@ -325,17 +360,15 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
325{ 360{
326 struct ieee80211_local *local = hw_to_local(hw); 361 struct ieee80211_local *local = hw_to_local(hw);
327 362
328 if (test_and_clear_bit(IEEE80211_LINK_STATE_XOFF, 363 if (test_bit(queue, local->queues_pending)) {
329 &local->state[queue])) { 364 tasklet_schedule(&local->tx_pending_tasklet);
330 if (test_bit(IEEE80211_LINK_STATE_PENDING, 365 } else {
331 &local->state[queue])) 366 if (ieee80211_is_multiqueue(local)) {
332 tasklet_schedule(&local->tx_pending_tasklet); 367 netif_wake_subqueue(local->mdev, queue);
333 else 368 } else {
334 if (!ieee80211_qdisc_installed(local->mdev)) { 369 WARN_ON(queue != 0);
335 if (queue == 0) 370 netif_wake_queue(local->mdev);
336 netif_wake_queue(local->mdev); 371 }
337 } else
338 __netif_schedule(local->mdev);
339 } 372 }
340} 373}
341EXPORT_SYMBOL(ieee80211_wake_queue); 374EXPORT_SYMBOL(ieee80211_wake_queue);
@@ -344,29 +377,20 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
344{ 377{
345 struct ieee80211_local *local = hw_to_local(hw); 378 struct ieee80211_local *local = hw_to_local(hw);
346 379
347 if (!ieee80211_qdisc_installed(local->mdev) && queue == 0) 380 if (ieee80211_is_multiqueue(local)) {
381 netif_stop_subqueue(local->mdev, queue);
382 } else {
383 WARN_ON(queue != 0);
348 netif_stop_queue(local->mdev); 384 netif_stop_queue(local->mdev);
349 set_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); 385 }
350} 386}
351EXPORT_SYMBOL(ieee80211_stop_queue); 387EXPORT_SYMBOL(ieee80211_stop_queue);
352 388
353void ieee80211_start_queues(struct ieee80211_hw *hw)
354{
355 struct ieee80211_local *local = hw_to_local(hw);
356 int i;
357
358 for (i = 0; i < local->hw.queues; i++)
359 clear_bit(IEEE80211_LINK_STATE_XOFF, &local->state[i]);
360 if (!ieee80211_qdisc_installed(local->mdev))
361 netif_start_queue(local->mdev);
362}
363EXPORT_SYMBOL(ieee80211_start_queues);
364
365void ieee80211_stop_queues(struct ieee80211_hw *hw) 389void ieee80211_stop_queues(struct ieee80211_hw *hw)
366{ 390{
367 int i; 391 int i;
368 392
369 for (i = 0; i < hw->queues; i++) 393 for (i = 0; i < ieee80211_num_queues(hw); i++)
370 ieee80211_stop_queue(hw, i); 394 ieee80211_stop_queue(hw, i);
371} 395}
372EXPORT_SYMBOL(ieee80211_stop_queues); 396EXPORT_SYMBOL(ieee80211_stop_queues);
@@ -375,7 +399,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw)
375{ 399{
376 int i; 400 int i;
377 401
378 for (i = 0; i < hw->queues; i++) 402 for (i = 0; i < hw->queues + hw->ampdu_queues; i++)
379 ieee80211_wake_queue(hw, i); 403 ieee80211_wake_queue(hw, i);
380} 404}
381EXPORT_SYMBOL(ieee80211_wake_queues); 405EXPORT_SYMBOL(ieee80211_wake_queues);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index affcecd78c10..872d2fcd1a5b 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -84,24 +84,17 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
84 struct sk_buff *skb, 84 struct sk_buff *skb,
85 struct ieee80211_key *key) 85 struct ieee80211_key *key)
86{ 86{
87 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 87 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
88 u16 fc; 88 unsigned int hdrlen;
89 int hdrlen;
90 u8 *newhdr; 89 u8 *newhdr;
91 90
92 fc = le16_to_cpu(hdr->frame_control); 91 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
93 fc |= IEEE80211_FCTL_PROTECTED;
94 hdr->frame_control = cpu_to_le16(fc);
95 92
96 if ((skb_headroom(skb) < WEP_IV_LEN || 93 if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
97 skb_tailroom(skb) < WEP_ICV_LEN)) { 94 skb_headroom(skb) < WEP_IV_LEN))
98 I802_DEBUG_INC(local->tx_expand_skb_head); 95 return NULL;
99 if (unlikely(pskb_expand_head(skb, WEP_IV_LEN, WEP_ICV_LEN,
100 GFP_ATOMIC)))
101 return NULL;
102 }
103 96
104 hdrlen = ieee80211_get_hdrlen(fc); 97 hdrlen = ieee80211_hdrlen(hdr->frame_control);
105 newhdr = skb_push(skb, WEP_IV_LEN); 98 newhdr = skb_push(skb, WEP_IV_LEN);
106 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); 99 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
107 ieee80211_wep_get_iv(local, key, newhdr + hdrlen); 100 ieee80211_wep_get_iv(local, key, newhdr + hdrlen);
@@ -113,12 +106,10 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
113 struct sk_buff *skb, 106 struct sk_buff *skb,
114 struct ieee80211_key *key) 107 struct ieee80211_key *key)
115{ 108{
116 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 109 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
117 u16 fc; 110 unsigned int hdrlen;
118 int hdrlen;
119 111
120 fc = le16_to_cpu(hdr->frame_control); 112 hdrlen = ieee80211_hdrlen(hdr->frame_control);
121 hdrlen = ieee80211_get_hdrlen(fc);
122 memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); 113 memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
123 skb_pull(skb, WEP_IV_LEN); 114 skb_pull(skb, WEP_IV_LEN);
124} 115}
@@ -228,17 +219,15 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
228 u32 klen; 219 u32 klen;
229 u8 *rc4key; 220 u8 *rc4key;
230 u8 keyidx; 221 u8 keyidx;
231 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 222 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
232 u16 fc; 223 unsigned int hdrlen;
233 int hdrlen;
234 size_t len; 224 size_t len;
235 int ret = 0; 225 int ret = 0;
236 226
237 fc = le16_to_cpu(hdr->frame_control); 227 if (!ieee80211_has_protected(hdr->frame_control))
238 if (!(fc & IEEE80211_FCTL_PROTECTED))
239 return -1; 228 return -1;
240 229
241 hdrlen = ieee80211_get_hdrlen(fc); 230 hdrlen = ieee80211_hdrlen(hdr->frame_control);
242 231
243 if (skb->len < 8 + hdrlen) 232 if (skb->len < 8 + hdrlen)
244 return -1; 233 return -1;
@@ -264,11 +253,8 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
264 253
265 if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, 254 if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen,
266 skb->data + hdrlen + WEP_IV_LEN, 255 skb->data + hdrlen + WEP_IV_LEN,
267 len)) { 256 len))
268 if (net_ratelimit())
269 printk(KERN_DEBUG "WEP decrypt failed (ICV)\n");
270 ret = -1; 257 ret = -1;
271 }
272 258
273 kfree(rc4key); 259 kfree(rc4key);
274 260
@@ -285,17 +271,15 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
285 271
286u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) 272u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
287{ 273{
288 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 274 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
289 u16 fc; 275 unsigned int hdrlen;
290 int hdrlen;
291 u8 *ivpos; 276 u8 *ivpos;
292 u32 iv; 277 u32 iv;
293 278
294 fc = le16_to_cpu(hdr->frame_control); 279 if (!ieee80211_has_protected(hdr->frame_control))
295 if (!(fc & IEEE80211_FCTL_PROTECTED))
296 return NULL; 280 return NULL;
297 281
298 hdrlen = ieee80211_get_hdrlen(fc); 282 hdrlen = ieee80211_hdrlen(hdr->frame_control);
299 ivpos = skb->data + hdrlen; 283 ivpos = skb->data + hdrlen;
300 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; 284 iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
301 285
@@ -314,14 +298,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
314 return RX_CONTINUE; 298 return RX_CONTINUE;
315 299
316 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
317 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { 301 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
318#ifdef CONFIG_MAC80211_DEBUG
319 if (net_ratelimit())
320 printk(KERN_DEBUG "%s: RX WEP frame, decrypt "
321 "failed\n", rx->dev->name);
322#endif /* CONFIG_MAC80211_DEBUG */
323 return RX_DROP_UNUSABLE; 302 return RX_DROP_UNUSABLE;
324 }
325 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { 303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
326 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
327 /* remove ICV */ 305 /* remove ICV */
@@ -333,11 +311,16 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
333 311
334static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) 312static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
335{ 313{
314 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
315
316 info->control.iv_len = WEP_IV_LEN;
317 info->control.icv_len = WEP_ICV_LEN;
318
336 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 319 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
337 if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) 320 if (ieee80211_wep_encrypt(tx->local, skb, tx->key))
338 return -1; 321 return -1;
339 } else { 322 } else {
340 tx->control->key_idx = tx->key->conf.hw_key_idx; 323 info->control.hw_key = &tx->key->conf;
341 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { 324 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) {
342 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) 325 if (!ieee80211_wep_add_iv(tx->local, skb, tx->key))
343 return -1; 326 return -1;
@@ -349,8 +332,6 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
349ieee80211_tx_result 332ieee80211_tx_result
350ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) 333ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
351{ 334{
352 tx->control->iv_len = WEP_IV_LEN;
353 tx->control->icv_len = WEP_ICV_LEN;
354 ieee80211_tx_set_protected(tx); 335 ieee80211_tx_set_protected(tx);
355 336
356 if (wep_encrypt_skb(tx, tx->skb) < 0) { 337 if (wep_encrypt_skb(tx, tx->skb) < 0) {
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 363779c50658..e587172115b8 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -26,7 +26,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb,
26 struct ieee80211_key *key); 26 struct ieee80211_key *key);
27int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, 27int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
28 struct ieee80211_key *key); 28 struct ieee80211_key *key);
29u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 29u8 *ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
30 30
31ieee80211_rx_result 31ieee80211_rx_result
32ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); 32ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx);
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index e8404212ad57..1babb979fe00 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -142,7 +142,39 @@ static int ieee80211_ioctl_giwname(struct net_device *dev,
142 struct iw_request_info *info, 142 struct iw_request_info *info,
143 char *name, char *extra) 143 char *name, char *extra)
144{ 144{
145 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
146 struct ieee80211_supported_band *sband;
147 u8 is_ht = 0, is_a = 0, is_b = 0, is_g = 0;
148
149
150 sband = local->hw.wiphy->bands[IEEE80211_BAND_5GHZ];
151 if (sband) {
152 is_a = 1;
153 is_ht |= sband->ht_info.ht_supported;
154 }
155
156 sband = local->hw.wiphy->bands[IEEE80211_BAND_2GHZ];
157 if (sband) {
158 int i;
159 /* Check for mandatory rates */
160 for (i = 0; i < sband->n_bitrates; i++) {
161 if (sband->bitrates[i].bitrate == 10)
162 is_b = 1;
163 if (sband->bitrates[i].bitrate == 60)
164 is_g = 1;
165 }
166 is_ht |= sband->ht_info.ht_supported;
167 }
168
145 strcpy(name, "IEEE 802.11"); 169 strcpy(name, "IEEE 802.11");
170 if (is_a)
171 strcat(name, "a");
172 if (is_b)
173 strcat(name, "b");
174 if (is_g)
175 strcat(name, "g");
176 if (is_ht)
177 strcat(name, "n");
146 178
147 return 0; 179 return 0;
148} 180}
@@ -176,14 +208,26 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
176 range->num_encoding_sizes = 2; 208 range->num_encoding_sizes = 2;
177 range->max_encoding_tokens = NUM_DEFAULT_KEYS; 209 range->max_encoding_tokens = NUM_DEFAULT_KEYS;
178 210
179 range->max_qual.qual = local->hw.max_signal; 211 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC ||
180 range->max_qual.level = local->hw.max_rssi; 212 local->hw.flags & IEEE80211_HW_SIGNAL_DB)
181 range->max_qual.noise = local->hw.max_noise; 213 range->max_qual.level = local->hw.max_signal;
214 else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
215 range->max_qual.level = -110;
216 else
217 range->max_qual.level = 0;
218
219 if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
220 range->max_qual.noise = -110;
221 else
222 range->max_qual.noise = 0;
223
224 range->max_qual.qual = 100;
182 range->max_qual.updated = local->wstats_flags; 225 range->max_qual.updated = local->wstats_flags;
183 226
184 range->avg_qual.qual = local->hw.max_signal/2; 227 range->avg_qual.qual = 50;
185 range->avg_qual.level = 0; 228 /* not always true but better than nothing */
186 range->avg_qual.noise = 0; 229 range->avg_qual.level = range->max_qual.level / 2;
230 range->avg_qual.noise = range->max_qual.noise / 2;
187 range->avg_qual.updated = local->wstats_flags; 231 range->avg_qual.updated = local->wstats_flags;
188 232
189 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 233 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
@@ -562,7 +606,7 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev,
562 if (local->sta_sw_scanning || local->sta_hw_scanning) 606 if (local->sta_sw_scanning || local->sta_hw_scanning)
563 return -EAGAIN; 607 return -EAGAIN;
564 608
565 res = ieee80211_sta_scan_results(dev, extra, data->length); 609 res = ieee80211_sta_scan_results(dev, info, extra, data->length);
566 if (res >= 0) { 610 if (res >= 0) {
567 data->length = res; 611 data->length = res;
568 return 0; 612 return 0;
@@ -716,6 +760,9 @@ static int ieee80211_ioctl_siwrts(struct net_device *dev,
716 760
717 if (rts->disabled) 761 if (rts->disabled)
718 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 762 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
763 else if (!rts->fixed)
764 /* if the rts value is not fixed, then take default */
765 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
719 else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD) 766 else if (rts->value < 0 || rts->value > IEEE80211_MAX_RTS_THRESHOLD)
720 return -EINVAL; 767 return -EINVAL;
721 else 768 else
@@ -944,6 +991,19 @@ static int ieee80211_ioctl_giwencode(struct net_device *dev,
944 erq->length = sdata->keys[idx]->conf.keylen; 991 erq->length = sdata->keys[idx]->conf.keylen;
945 erq->flags |= IW_ENCODE_ENABLED; 992 erq->flags |= IW_ENCODE_ENABLED;
946 993
994 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) {
995 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
996 switch (ifsta->auth_alg) {
997 case WLAN_AUTH_OPEN:
998 case WLAN_AUTH_LEAP:
999 erq->flags |= IW_ENCODE_OPEN;
1000 break;
1001 case WLAN_AUTH_SHARED_KEY:
1002 erq->flags |= IW_ENCODE_RESTRICTED;
1003 break;
1004 }
1005 }
1006
947 return 0; 1007 return 0;
948} 1008}
949 1009
@@ -1015,8 +1075,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
1015 wstats->qual.noise = 0; 1075 wstats->qual.noise = 0;
1016 wstats->qual.updated = IW_QUAL_ALL_INVALID; 1076 wstats->qual.updated = IW_QUAL_ALL_INVALID;
1017 } else { 1077 } else {
1018 wstats->qual.level = sta->last_rssi; 1078 wstats->qual.level = sta->last_signal;
1019 wstats->qual.qual = sta->last_signal; 1079 wstats->qual.qual = sta->last_qual;
1020 wstats->qual.noise = sta->last_noise; 1080 wstats->qual.noise = sta->last_noise;
1021 wstats->qual.updated = local->wstats_flags; 1081 wstats->qual.updated = local->wstats_flags;
1022 } 1082 }
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 5d09e8698b57..ffe1af82fa4d 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -19,16 +19,22 @@
19#include "wme.h" 19#include "wme.h"
20 20
21/* maximum number of hardware queues we support. */ 21/* maximum number of hardware queues we support. */
22#define TC_80211_MAX_QUEUES 16 22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
23 25
26/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
25 31
26struct ieee80211_sched_data 32struct ieee80211_sched_data
27{ 33{
28 unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)]; 34 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
29 struct tcf_proto *filter_list; 35 struct tcf_proto *filter_list;
30 struct Qdisc *queues[TC_80211_MAX_QUEUES]; 36 struct Qdisc *queues[QD_MAX_QUEUES];
31 struct sk_buff_head requeued[TC_80211_MAX_QUEUES]; 37 struct sk_buff_head requeued[QD_MAX_QUEUES];
32}; 38};
33 39
34static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; 40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
@@ -95,28 +101,22 @@ static inline int wme_downgrade_ac(struct sk_buff *skb)
95 101
96/* positive return value indicates which queue to use 102/* positive return value indicates which queue to use
97 * negative return value indicates to drop the frame */ 103 * negative return value indicates to drop the frame */
98static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) 104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
99{ 105{
100 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 106 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
102 unsigned short fc = le16_to_cpu(hdr->frame_control);
103 int qos;
104 108
105 /* see if frame is data or non data frame */ 109 if (!ieee80211_is_data(hdr->frame_control)) {
106 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
107 /* management frames go on AC_VO queue, but are sent 110 /* management frames go on AC_VO queue, but are sent
108 * without QoS control fields */ 111 * without QoS control fields */
109 return IEEE80211_TX_QUEUE_DATA0; 112 return 0;
110 } 113 }
111 114
112 if (0 /* injected */) { 115 if (0 /* injected */) {
113 /* use AC from radiotap */ 116 /* use AC from radiotap */
114 } 117 }
115 118
116 /* is this a QoS frame? */ 119 if (!ieee80211_is_data_qos(hdr->frame_control)) {
117 qos = fc & IEEE80211_STYPE_QOS_DATA;
118
119 if (!qos) {
120 skb->priority = 0; /* required for correct WPA/11i MIC */ 120 skb->priority = 0; /* required for correct WPA/11i MIC */
121 return ieee802_1d_to_ac[skb->priority]; 121 return ieee802_1d_to_ac[skb->priority];
122 } 122 }
@@ -141,29 +141,28 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) 141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{ 142{
143 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 143 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
144 struct ieee80211_hw *hw = &local->hw;
144 struct ieee80211_sched_data *q = qdisc_priv(qd); 145 struct ieee80211_sched_data *q = qdisc_priv(qd);
145 struct ieee80211_tx_packet_data *pkt_data = 146 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
146 (struct ieee80211_tx_packet_data *) skb->cb;
147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 unsigned short fc = le16_to_cpu(hdr->frame_control);
149 struct Qdisc *qdisc; 148 struct Qdisc *qdisc;
150 int err, queue;
151 struct sta_info *sta; 149 struct sta_info *sta;
150 int err, queue;
152 u8 tid; 151 u8 tid;
153 152
154 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { 153 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
155 queue = pkt_data->queue; 154 queue = skb_get_queue_mapping(skb);
156 rcu_read_lock(); 155 rcu_read_lock();
157 sta = sta_info_get(local, hdr->addr1); 156 sta = sta_info_get(local, hdr->addr1);
158 tid = skb->priority & QOS_CONTROL_TAG1D_MASK; 157 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
159 if (sta) { 158 if (sta) {
160 int ampdu_queue = sta->tid_to_tx_q[tid]; 159 int ampdu_queue = sta->tid_to_tx_q[tid];
161 if ((ampdu_queue < local->hw.queues) && 160 if ((ampdu_queue < QD_NUM(hw)) &&
162 test_bit(ampdu_queue, q->qdisc_pool)) { 161 test_bit(ampdu_queue, q->qdisc_pool)) {
163 queue = ampdu_queue; 162 queue = ampdu_queue;
164 pkt_data->flags |= IEEE80211_TXPD_AMPDU; 163 info->flags |= IEEE80211_TX_CTL_AMPDU;
165 } else { 164 } else {
166 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; 165 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
167 } 166 }
168 } 167 }
169 rcu_read_unlock(); 168 rcu_read_unlock();
@@ -174,18 +173,20 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
174 173
175 queue = classify80211(skb, qd); 174 queue = classify80211(skb, qd);
176 175
176 if (unlikely(queue >= local->hw.queues))
177 queue = local->hw.queues - 1;
178
177 /* now we know the 1d priority, fill in the QoS header if there is one 179 /* now we know the 1d priority, fill in the QoS header if there is one
178 */ 180 */
179 if (WLAN_FC_IS_QOS_DATA(fc)) { 181 if (ieee80211_is_data_qos(hdr->frame_control)) {
180 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2; 182 u8 *p = ieee80211_get_qos_ctl(hdr);
181 u8 ack_policy = 0; 183 u8 ack_policy = 0;
182 tid = skb->priority & QOS_CONTROL_TAG1D_MASK; 184 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
183 if (local->wifi_wme_noack_test) 185 if (local->wifi_wme_noack_test)
184 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << 186 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
185 QOS_CONTROL_ACK_POLICY_SHIFT; 187 QOS_CONTROL_ACK_POLICY_SHIFT;
186 /* qos header is 2 bytes, second reserved */ 188 /* qos header is 2 bytes, second reserved */
187 *p = ack_policy | tid; 189 *p++ = ack_policy | tid;
188 p++;
189 *p = 0; 190 *p = 0;
190 191
191 rcu_read_lock(); 192 rcu_read_lock();
@@ -193,35 +194,24 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
193 sta = sta_info_get(local, hdr->addr1); 194 sta = sta_info_get(local, hdr->addr1);
194 if (sta) { 195 if (sta) {
195 int ampdu_queue = sta->tid_to_tx_q[tid]; 196 int ampdu_queue = sta->tid_to_tx_q[tid];
196 if ((ampdu_queue < local->hw.queues) && 197 if ((ampdu_queue < QD_NUM(hw)) &&
197 test_bit(ampdu_queue, q->qdisc_pool)) { 198 test_bit(ampdu_queue, q->qdisc_pool)) {
198 queue = ampdu_queue; 199 queue = ampdu_queue;
199 pkt_data->flags |= IEEE80211_TXPD_AMPDU; 200 info->flags |= IEEE80211_TX_CTL_AMPDU;
200 } else { 201 } else {
201 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU; 202 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
202 } 203 }
203 } 204 }
204 205
205 rcu_read_unlock(); 206 rcu_read_unlock();
206 } 207 }
207 208
208 if (unlikely(queue >= local->hw.queues)) {
209#if 0
210 if (net_ratelimit()) {
211 printk(KERN_DEBUG "%s - queue=%d (hw does not "
212 "support) -> %d\n",
213 __func__, queue, local->hw.queues - 1);
214 }
215#endif
216 queue = local->hw.queues - 1;
217 }
218
219 if (unlikely(queue < 0)) { 209 if (unlikely(queue < 0)) {
220 kfree_skb(skb); 210 kfree_skb(skb);
221 err = NET_XMIT_DROP; 211 err = NET_XMIT_DROP;
222 } else { 212 } else {
223 tid = skb->priority & QOS_CONTROL_TAG1D_MASK; 213 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
224 pkt_data->queue = (unsigned int) queue; 214 skb_set_queue_mapping(skb, queue);
225 qdisc = q->queues[queue]; 215 qdisc = q->queues[queue];
226 err = qdisc->enqueue(skb, qdisc); 216 err = qdisc->enqueue(skb, qdisc);
227 if (err == NET_XMIT_SUCCESS) { 217 if (err == NET_XMIT_SUCCESS) {
@@ -242,13 +232,11 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
242static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd) 232static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
243{ 233{
244 struct ieee80211_sched_data *q = qdisc_priv(qd); 234 struct ieee80211_sched_data *q = qdisc_priv(qd);
245 struct ieee80211_tx_packet_data *pkt_data =
246 (struct ieee80211_tx_packet_data *) skb->cb;
247 struct Qdisc *qdisc; 235 struct Qdisc *qdisc;
248 int err; 236 int err;
249 237
250 /* we recorded which queue to use earlier! */ 238 /* we recorded which queue to use earlier! */
251 qdisc = q->queues[pkt_data->queue]; 239 qdisc = q->queues[skb_get_queue_mapping(skb)];
252 240
253 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) { 241 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
254 qd->q.qlen++; 242 qd->q.qlen++;
@@ -270,13 +258,10 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
270 int queue; 258 int queue;
271 259
272 /* check all the h/w queues in numeric/priority order */ 260 /* check all the h/w queues in numeric/priority order */
273 for (queue = 0; queue < hw->queues; queue++) { 261 for (queue = 0; queue < QD_NUM(hw); queue++) {
274 /* see if there is room in this hardware queue */ 262 /* see if there is room in this hardware queue */
275 if ((test_bit(IEEE80211_LINK_STATE_XOFF, 263 if (__netif_subqueue_stopped(local->mdev, queue) ||
276 &local->state[queue])) || 264 !test_bit(queue, q->qdisc_pool))
277 (test_bit(IEEE80211_LINK_STATE_PENDING,
278 &local->state[queue])) ||
279 (!test_bit(queue, q->qdisc_pool)))
280 continue; 265 continue;
281 266
282 /* there is space - try and get a frame */ 267 /* there is space - try and get a frame */
@@ -308,7 +293,7 @@ static void wme_qdiscop_reset(struct Qdisc* qd)
308 293
309 /* QUESTION: should we have some hardware flush functionality here? */ 294 /* QUESTION: should we have some hardware flush functionality here? */
310 295
311 for (queue = 0; queue < hw->queues; queue++) { 296 for (queue = 0; queue < QD_NUM(hw); queue++) {
312 skb_queue_purge(&q->requeued[queue]); 297 skb_queue_purge(&q->requeued[queue]);
313 qdisc_reset(q->queues[queue]); 298 qdisc_reset(q->queues[queue]);
314 } 299 }
@@ -325,7 +310,7 @@ static void wme_qdiscop_destroy(struct Qdisc* qd)
325 310
326 tcf_destroy_chain(&q->filter_list); 311 tcf_destroy_chain(&q->filter_list);
327 312
328 for (queue=0; queue < hw->queues; queue++) { 313 for (queue = 0; queue < QD_NUM(hw); queue++) {
329 skb_queue_purge(&q->requeued[queue]); 314 skb_queue_purge(&q->requeued[queue]);
330 qdisc_destroy(q->queues[queue]); 315 qdisc_destroy(q->queues[queue]);
331 q->queues[queue] = &noop_qdisc; 316 q->queues[queue] = &noop_qdisc;
@@ -336,17 +321,6 @@ static void wme_qdiscop_destroy(struct Qdisc* qd)
336/* called whenever parameters are updated on existing qdisc */ 321/* called whenever parameters are updated on existing qdisc */
337static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt) 322static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
338{ 323{
339/* struct ieee80211_sched_data *q = qdisc_priv(qd);
340*/
341 /* check our options block is the right size */
342 /* copy any options to our local structure */
343/* Ignore options block for now - always use static mapping
344 struct tc_ieee80211_qopt *qopt = nla_data(opt);
345
346 if (opt->nla_len < nla_attr_size(sizeof(*qopt)))
347 return -EINVAL;
348 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
349*/
350 return 0; 324 return 0;
351} 325}
352 326
@@ -357,7 +331,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
357 struct ieee80211_sched_data *q = qdisc_priv(qd); 331 struct ieee80211_sched_data *q = qdisc_priv(qd);
358 struct net_device *dev = qd->dev; 332 struct net_device *dev = qd->dev;
359 struct ieee80211_local *local; 333 struct ieee80211_local *local;
360 int queues; 334 struct ieee80211_hw *hw;
361 int err = 0, i; 335 int err = 0, i;
362 336
363 /* check that device is a mac80211 device */ 337 /* check that device is a mac80211 device */
@@ -365,29 +339,26 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
365 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) 339 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
366 return -EINVAL; 340 return -EINVAL;
367 341
368 /* check this device is an ieee80211 master type device */ 342 local = wdev_priv(dev->ieee80211_ptr);
369 if (dev->type != ARPHRD_IEEE80211) 343 hw = &local->hw;
344
345 /* only allow on master dev */
346 if (dev != local->mdev)
370 return -EINVAL; 347 return -EINVAL;
371 348
372 /* check that there is no qdisc currently attached to device 349 /* ensure that we are root qdisc */
373 * this ensures that we will be the root qdisc. (I can't find a better 350 if (qd->parent != TC_H_ROOT)
374 * way to test this explicitly) */
375 if (dev->qdisc_sleeping != &noop_qdisc)
376 return -EINVAL; 351 return -EINVAL;
377 352
378 if (qd->flags & TCQ_F_INGRESS) 353 if (qd->flags & TCQ_F_INGRESS)
379 return -EINVAL; 354 return -EINVAL;
380 355
381 local = wdev_priv(dev->ieee80211_ptr);
382 queues = local->hw.queues;
383
384 /* if options were passed in, set them */ 356 /* if options were passed in, set them */
385 if (opt) { 357 if (opt)
386 err = wme_qdiscop_tune(qd, opt); 358 err = wme_qdiscop_tune(qd, opt);
387 }
388 359
389 /* create child queues */ 360 /* create child queues */
390 for (i = 0; i < queues; i++) { 361 for (i = 0; i < QD_NUM(hw); i++) {
391 skb_queue_head_init(&q->requeued[i]); 362 skb_queue_head_init(&q->requeued[i]);
392 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops, 363 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
393 qd->handle); 364 qd->handle);
@@ -398,8 +369,8 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
398 } 369 }
399 } 370 }
400 371
401 /* reserve all legacy QoS queues */ 372 /* non-aggregation queues: reserve/mark as used */
402 for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++) 373 for (i = 0; i < local->hw.queues; i++)
403 set_bit(i, q->qdisc_pool); 374 set_bit(i, q->qdisc_pool);
404 375
405 return err; 376 return err;
@@ -407,16 +378,6 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
407 378
408static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb) 379static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
409{ 380{
410/* struct ieee80211_sched_data *q = qdisc_priv(qd);
411 unsigned char *p = skb->tail;
412 struct tc_ieee80211_qopt opt;
413
414 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
415 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
416*/ return skb->len;
417/*
418nla_put_failure:
419 skb_trim(skb, p - skb->data);*/
420 return -1; 381 return -1;
421} 382}
422 383
@@ -429,7 +390,7 @@ static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
429 struct ieee80211_hw *hw = &local->hw; 390 struct ieee80211_hw *hw = &local->hw;
430 unsigned long queue = arg - 1; 391 unsigned long queue = arg - 1;
431 392
432 if (queue >= hw->queues) 393 if (queue >= QD_NUM(hw))
433 return -EINVAL; 394 return -EINVAL;
434 395
435 if (!new) 396 if (!new)
@@ -453,7 +414,7 @@ wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
453 struct ieee80211_hw *hw = &local->hw; 414 struct ieee80211_hw *hw = &local->hw;
454 unsigned long queue = arg - 1; 415 unsigned long queue = arg - 1;
455 416
456 if (queue >= hw->queues) 417 if (queue >= QD_NUM(hw))
457 return NULL; 418 return NULL;
458 419
459 return q->queues[queue]; 420 return q->queues[queue];
@@ -466,7 +427,7 @@ static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
466 struct ieee80211_hw *hw = &local->hw; 427 struct ieee80211_hw *hw = &local->hw;
467 unsigned long queue = TC_H_MIN(classid); 428 unsigned long queue = TC_H_MIN(classid);
468 429
469 if (queue - 1 >= hw->queues) 430 if (queue - 1 >= QD_NUM(hw))
470 return 0; 431 return 0;
471 432
472 return queue; 433 return queue;
@@ -492,7 +453,7 @@ static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
492 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 453 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
493 struct ieee80211_hw *hw = &local->hw; 454 struct ieee80211_hw *hw = &local->hw;
494 455
495 if (cl - 1 > hw->queues) 456 if (cl - 1 > QD_NUM(hw))
496 return -ENOENT; 457 return -ENOENT;
497 458
498 /* TODO: put code to program hardware queue parameters here, 459 /* TODO: put code to program hardware queue parameters here,
@@ -509,7 +470,7 @@ static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
509 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 470 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
510 struct ieee80211_hw *hw = &local->hw; 471 struct ieee80211_hw *hw = &local->hw;
511 472
512 if (cl - 1 > hw->queues) 473 if (cl - 1 > QD_NUM(hw))
513 return -ENOENT; 474 return -ENOENT;
514 return 0; 475 return 0;
515} 476}
@@ -522,7 +483,7 @@ static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
522 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); 483 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
523 struct ieee80211_hw *hw = &local->hw; 484 struct ieee80211_hw *hw = &local->hw;
524 485
525 if (cl - 1 > hw->queues) 486 if (cl - 1 > QD_NUM(hw))
526 return -ENOENT; 487 return -ENOENT;
527 tcm->tcm_handle = TC_H_MIN(cl); 488 tcm->tcm_handle = TC_H_MIN(cl);
528 tcm->tcm_parent = qd->handle; 489 tcm->tcm_parent = qd->handle;
@@ -540,7 +501,7 @@ static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
540 if (arg->stop) 501 if (arg->stop)
541 return; 502 return;
542 503
543 for (queue = 0; queue < hw->queues; queue++) { 504 for (queue = 0; queue < QD_NUM(hw); queue++) {
544 if (arg->count < arg->skip) { 505 if (arg->count < arg->skip) {
545 arg->count++; 506 arg->count++;
546 continue; 507 continue;
@@ -657,10 +618,13 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
657 DECLARE_MAC_BUF(mac); 618 DECLARE_MAC_BUF(mac);
658 619
659 /* prepare the filter and save it for the SW queue 620 /* prepare the filter and save it for the SW queue
660 * matching the recieved HW queue */ 621 * matching the received HW queue */
622
623 if (!local->hw.ampdu_queues)
624 return -EPERM;
661 625
662 /* try to get a Qdisc from the pool */ 626 /* try to get a Qdisc from the pool */
663 for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++) 627 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
664 if (!test_and_set_bit(i, q->qdisc_pool)) { 628 if (!test_and_set_bit(i, q->qdisc_pool)) {
665 ieee80211_stop_queue(local_to_hw(local), i); 629 ieee80211_stop_queue(local_to_hw(local), i);
666 sta->tid_to_tx_q[tid] = i; 630 sta->tid_to_tx_q[tid] = i;
@@ -689,13 +653,14 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
689 struct sta_info *sta, u16 tid, 653 struct sta_info *sta, u16 tid,
690 u8 requeue) 654 u8 requeue)
691{ 655{
656 struct ieee80211_hw *hw = &local->hw;
692 struct ieee80211_sched_data *q = 657 struct ieee80211_sched_data *q =
693 qdisc_priv(local->mdev->qdisc_sleeping); 658 qdisc_priv(local->mdev->qdisc_sleeping);
694 int agg_queue = sta->tid_to_tx_q[tid]; 659 int agg_queue = sta->tid_to_tx_q[tid];
695 660
696 /* return the qdisc to the pool */ 661 /* return the qdisc to the pool */
697 clear_bit(agg_queue, q->qdisc_pool); 662 clear_bit(agg_queue, q->qdisc_pool);
698 sta->tid_to_tx_q[tid] = local->hw.queues; 663 sta->tid_to_tx_q[tid] = QD_NUM(hw);
699 664
700 if (requeue) 665 if (requeue)
701 ieee80211_requeue(local, agg_queue); 666 ieee80211_requeue(local, agg_queue);
@@ -714,7 +679,6 @@ void ieee80211_requeue(struct ieee80211_local *local, int queue)
714 if (!qdisc || !qdisc->dequeue) 679 if (!qdisc || !qdisc->dequeue)
715 return; 680 return;
716 681
717 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
718 for (len = qdisc->q.qlen; len > 0; len--) { 682 for (len = qdisc->q.qlen; len > 0; len--) {
719 skb = qdisc->dequeue(qdisc); 683 skb = qdisc->dequeue(qdisc);
720 root_qd->q.qlen--; 684 root_qd->q.qlen--;
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index fcc6b05508cc..bbdb53344817 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -31,7 +31,7 @@ static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
31 return (fc & 0x8C) == 0x88; 31 return (fc & 0x8C) == 0x88;
32} 32}
33 33
34#ifdef CONFIG_NET_SCHED 34#ifdef CONFIG_MAC80211_QOS
35void ieee80211_install_qdisc(struct net_device *dev); 35void ieee80211_install_qdisc(struct net_device *dev);
36int ieee80211_qdisc_installed(struct net_device *dev); 36int ieee80211_qdisc_installed(struct net_device *dev);
37int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, 37int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 45709ada8fee..b414d5d92f38 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -24,46 +24,22 @@ static int ieee80211_get_hdr_info(const struct sk_buff *skb, u8 **sa, u8 **da,
24{ 24{
25 struct ieee80211_hdr *hdr; 25 struct ieee80211_hdr *hdr;
26 size_t hdrlen; 26 size_t hdrlen;
27 u16 fc; 27 __le16 fc;
28 int a4_included;
29 u8 *pos;
30 28
31 hdr = (struct ieee80211_hdr *) skb->data; 29 hdr = (struct ieee80211_hdr *)skb->data;
32 fc = le16_to_cpu(hdr->frame_control); 30 fc = hdr->frame_control;
33
34 hdrlen = 24;
35 if ((fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) ==
36 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
37 hdrlen += ETH_ALEN;
38 *sa = hdr->addr4;
39 *da = hdr->addr3;
40 } else if (fc & IEEE80211_FCTL_FROMDS) {
41 *sa = hdr->addr3;
42 *da = hdr->addr1;
43 } else if (fc & IEEE80211_FCTL_TODS) {
44 *sa = hdr->addr2;
45 *da = hdr->addr3;
46 } else {
47 *sa = hdr->addr2;
48 *da = hdr->addr1;
49 }
50 31
51 if (fc & 0x80) 32 hdrlen = ieee80211_hdrlen(fc);
52 hdrlen += 2; 33
34 *sa = ieee80211_get_SA(hdr);
35 *da = ieee80211_get_DA(hdr);
53 36
54 *data = skb->data + hdrlen; 37 *data = skb->data + hdrlen;
55 *data_len = skb->len - hdrlen; 38 *data_len = skb->len - hdrlen;
56 39
57 a4_included = (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == 40 if (ieee80211_is_data_qos(fc))
58 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); 41 *qos_tid = (*ieee80211_get_qos_ctl(hdr) & 0x0f) | 0x80;
59 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 42 else
60 fc & IEEE80211_STYPE_QOS_DATA) {
61 pos = (u8 *) &hdr->addr4;
62 if (a4_included)
63 pos += 6;
64 *qos_tid = pos[0] & 0x0f;
65 *qos_tid |= 0x80; /* qos_included flag */
66 } else
67 *qos_tid = 0; 43 *qos_tid = 0;
68 44
69 return skb->len < hdrlen ? -1 : 0; 45 return skb->len < hdrlen ? -1 : 0;
@@ -73,12 +49,13 @@ static int ieee80211_get_hdr_info(const struct sk_buff *skb, u8 **sa, u8 **da,
73ieee80211_tx_result 49ieee80211_tx_result
74ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) 50ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
75{ 51{
76 u8 *data, *sa, *da, *key, *mic, qos_tid; 52 u8 *data, *sa, *da, *key, *mic, qos_tid, key_offset;
77 size_t data_len; 53 size_t data_len;
78 u16 fc; 54 u16 fc;
79 struct sk_buff *skb = tx->skb; 55 struct sk_buff *skb = tx->skb;
80 int authenticator; 56 int authenticator;
81 int wpa_test = 0; 57 int wpa_test = 0;
58 int tail;
82 59
83 fc = tx->fc; 60 fc = tx->fc;
84 61
@@ -98,24 +75,25 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
98 return TX_CONTINUE; 75 return TX_CONTINUE;
99 } 76 }
100 77
101 if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { 78 tail = MICHAEL_MIC_LEN;
102 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 79 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
103 if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, 80 tail += TKIP_ICV_LEN;
104 MICHAEL_MIC_LEN + TKIP_ICV_LEN, 81
105 GFP_ATOMIC))) { 82 if (WARN_ON(skb_tailroom(skb) < tail ||
106 printk(KERN_DEBUG "%s: failed to allocate more memory " 83 skb_headroom(skb) < TKIP_IV_LEN))
107 "for Michael MIC\n", tx->dev->name); 84 return TX_DROP;
108 return TX_DROP;
109 }
110 }
111 85
112#if 0 86#if 0
113 authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ 87 authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */
114#else 88#else
115 authenticator = 1; 89 authenticator = 1;
116#endif 90#endif
117 key = &tx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_TX_MIC_KEY : 91 /* At this point we know we're using ALG_TKIP. To get the MIC key
118 ALG_TKIP_TEMP_AUTH_RX_MIC_KEY]; 92 * we now will rely on the offset from the ieee80211_key_conf::key */
93 key_offset = authenticator ?
94 NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY :
95 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
96 key = &tx->key->conf.key[key_offset];
119 mic = skb_put(skb, MICHAEL_MIC_LEN); 97 mic = skb_put(skb, MICHAEL_MIC_LEN);
120 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 98 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic);
121 99
@@ -126,7 +104,7 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
126ieee80211_rx_result 104ieee80211_rx_result
127ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) 105ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
128{ 106{
129 u8 *data, *sa, *da, *key = NULL, qos_tid; 107 u8 *data, *sa, *da, *key = NULL, qos_tid, key_offset;
130 size_t data_len; 108 size_t data_len;
131 u16 fc; 109 u16 fc;
132 u8 mic[MICHAEL_MIC_LEN]; 110 u8 mic[MICHAEL_MIC_LEN];
@@ -157,16 +135,17 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
157#else 135#else
158 authenticator = 1; 136 authenticator = 1;
159#endif 137#endif
160 key = &rx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_RX_MIC_KEY : 138 /* At this point we know we're using ALG_TKIP. To get the MIC key
161 ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; 139 * we now will rely on the offset from the ieee80211_key_conf::key */
140 key_offset = authenticator ?
141 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY :
142 NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
143 key = &rx->key->conf.key[key_offset];
162 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 144 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic);
163 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { 145 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
164 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 146 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
165 return RX_DROP_UNUSABLE; 147 return RX_DROP_UNUSABLE;
166 148
167 printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from "
168 "%s\n", rx->dev->name, print_mac(mac, sa));
169
170 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, 149 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx,
171 (void *) skb->data); 150 (void *) skb->data);
172 return RX_DROP_UNUSABLE; 151 return RX_DROP_UNUSABLE;
@@ -176,59 +155,58 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
176 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); 155 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
177 156
178 /* update IV in key information to be able to detect replays */ 157 /* update IV in key information to be able to detect replays */
179 rx->key->u.tkip.iv32_rx[rx->queue] = rx->tkip_iv32; 158 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
180 rx->key->u.tkip.iv16_rx[rx->queue] = rx->tkip_iv16; 159 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
181 160
182 return RX_CONTINUE; 161 return RX_CONTINUE;
183} 162}
184 163
185 164
186static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, 165static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
187 struct sk_buff *skb, int test)
188{ 166{
189 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 167 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
190 struct ieee80211_key *key = tx->key; 168 struct ieee80211_key *key = tx->key;
191 int hdrlen, len, tailneed; 169 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
192 u16 fc; 170 unsigned int hdrlen;
171 int len, tail;
193 u8 *pos; 172 u8 *pos;
194 173
195 fc = le16_to_cpu(hdr->frame_control); 174 info->control.icv_len = TKIP_ICV_LEN;
196 hdrlen = ieee80211_get_hdrlen(fc); 175 info->control.iv_len = TKIP_IV_LEN;
176
177 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
178 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
179 /* hwaccel - with no need for preallocated room for IV/ICV */
180 info->control.hw_key = &tx->key->conf;
181 return 0;
182 }
183
184 hdrlen = ieee80211_hdrlen(hdr->frame_control);
197 len = skb->len - hdrlen; 185 len = skb->len - hdrlen;
198 186
199 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 187 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
200 tailneed = 0; 188 tail = 0;
201 else 189 else
202 tailneed = TKIP_ICV_LEN; 190 tail = TKIP_ICV_LEN;
203 191
204 if ((skb_headroom(skb) < TKIP_IV_LEN || 192 if (WARN_ON(skb_tailroom(skb) < tail ||
205 skb_tailroom(skb) < tailneed)) { 193 skb_headroom(skb) < TKIP_IV_LEN))
206 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 194 return -1;
207 if (unlikely(pskb_expand_head(skb, TKIP_IV_LEN, tailneed,
208 GFP_ATOMIC)))
209 return -1;
210 }
211 195
212 pos = skb_push(skb, TKIP_IV_LEN); 196 pos = skb_push(skb, TKIP_IV_LEN);
213 memmove(pos, pos + TKIP_IV_LEN, hdrlen); 197 memmove(pos, pos + TKIP_IV_LEN, hdrlen);
214 pos += hdrlen; 198 pos += hdrlen;
215 199
216 /* Increase IV for the frame */ 200 /* Increase IV for the frame */
217 key->u.tkip.iv16++; 201 key->u.tkip.tx.iv16++;
218 if (key->u.tkip.iv16 == 0) 202 if (key->u.tkip.tx.iv16 == 0)
219 key->u.tkip.iv32++; 203 key->u.tkip.tx.iv32++;
220 204
221 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 205 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
222 hdr = (struct ieee80211_hdr *)skb->data;
223
224 /* hwaccel - with preallocated room for IV */ 206 /* hwaccel - with preallocated room for IV */
225 ieee80211_tkip_add_iv(pos, key, 207 ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
226 (u8) (key->u.tkip.iv16 >> 8),
227 (u8) (((key->u.tkip.iv16 >> 8) | 0x20) &
228 0x7f),
229 (u8) key->u.tkip.iv16);
230 208
231 tx->control->key_idx = tx->key->conf.hw_key_idx; 209 info->control.hw_key = &tx->key->conf;
232 return 0; 210 return 0;
233 } 211 }
234 212
@@ -246,28 +224,16 @@ ieee80211_tx_result
246ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) 224ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
247{ 225{
248 struct sk_buff *skb = tx->skb; 226 struct sk_buff *skb = tx->skb;
249 int wpa_test = 0, test = 0;
250 227
251 tx->control->icv_len = TKIP_ICV_LEN;
252 tx->control->iv_len = TKIP_IV_LEN;
253 ieee80211_tx_set_protected(tx); 228 ieee80211_tx_set_protected(tx);
254 229
255 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 230 if (tkip_encrypt_skb(tx, skb) < 0)
256 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
257 !wpa_test) {
258 /* hwaccel - with no need for preallocated room for IV/ICV */
259 tx->control->key_idx = tx->key->conf.hw_key_idx;
260 return TX_CONTINUE;
261 }
262
263 if (tkip_encrypt_skb(tx, skb, test) < 0)
264 return TX_DROP; 231 return TX_DROP;
265 232
266 if (tx->extra_frag) { 233 if (tx->extra_frag) {
267 int i; 234 int i;
268 for (i = 0; i < tx->num_extra_frag; i++) { 235 for (i = 0; i < tx->num_extra_frag; i++) {
269 if (tkip_encrypt_skb(tx, tx->extra_frag[i], test) 236 if (tkip_encrypt_skb(tx, tx->extra_frag[i]) < 0)
270 < 0)
271 return TX_DROP; 237 return TX_DROP;
272 } 238 }
273 } 239 }
@@ -280,14 +246,12 @@ ieee80211_rx_result
280ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) 246ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
281{ 247{
282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 248 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
283 u16 fc;
284 int hdrlen, res, hwaccel = 0, wpa_test = 0; 249 int hdrlen, res, hwaccel = 0, wpa_test = 0;
285 struct ieee80211_key *key = rx->key; 250 struct ieee80211_key *key = rx->key;
286 struct sk_buff *skb = rx->skb; 251 struct sk_buff *skb = rx->skb;
287 DECLARE_MAC_BUF(mac); 252 DECLARE_MAC_BUF(mac);
288 253
289 fc = le16_to_cpu(hdr->frame_control); 254 hdrlen = ieee80211_hdrlen(hdr->frame_control);
290 hdrlen = ieee80211_get_hdrlen(fc);
291 255
292 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 256 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
293 return RX_CONTINUE; 257 return RX_CONTINUE;
@@ -315,15 +279,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
315 hdr->addr1, hwaccel, rx->queue, 279 hdr->addr1, hwaccel, rx->queue,
316 &rx->tkip_iv32, 280 &rx->tkip_iv32,
317 &rx->tkip_iv16); 281 &rx->tkip_iv16);
318 if (res != TKIP_DECRYPT_OK || wpa_test) { 282 if (res != TKIP_DECRYPT_OK || wpa_test)
319#ifdef CONFIG_MAC80211_DEBUG
320 if (net_ratelimit())
321 printk(KERN_DEBUG "%s: TKIP decrypt failed for RX "
322 "frame from %s (res=%d)\n", rx->dev->name,
323 print_mac(mac, rx->sta->addr), res);
324#endif /* CONFIG_MAC80211_DEBUG */
325 return RX_DROP_UNUSABLE; 283 return RX_DROP_UNUSABLE;
326 }
327 284
328 /* Trim ICV */ 285 /* Trim ICV */
329 skb_trim(skb, skb->len - TKIP_ICV_LEN); 286 skb_trim(skb, skb->len - TKIP_ICV_LEN);
@@ -429,36 +386,41 @@ static inline int ccmp_hdr2pn(u8 *pn, u8 *hdr)
429} 386}
430 387
431 388
432static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, 389static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
433 struct sk_buff *skb, int test)
434{ 390{
435 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
436 struct ieee80211_key *key = tx->key; 392 struct ieee80211_key *key = tx->key;
437 int hdrlen, len, tailneed; 393 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
438 u16 fc; 394 int hdrlen, len, tail;
439 u8 *pos, *pn, *b_0, *aad, *scratch; 395 u8 *pos, *pn, *b_0, *aad, *scratch;
440 int i; 396 int i;
441 397
398 info->control.icv_len = CCMP_MIC_LEN;
399 info->control.iv_len = CCMP_HDR_LEN;
400
401 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
402 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
403 /* hwaccel - with no need for preallocated room for CCMP "
404 * header or MIC fields */
405 info->control.hw_key = &tx->key->conf;
406 return 0;
407 }
408
442 scratch = key->u.ccmp.tx_crypto_buf; 409 scratch = key->u.ccmp.tx_crypto_buf;
443 b_0 = scratch + 3 * AES_BLOCK_LEN; 410 b_0 = scratch + 3 * AES_BLOCK_LEN;
444 aad = scratch + 4 * AES_BLOCK_LEN; 411 aad = scratch + 4 * AES_BLOCK_LEN;
445 412
446 fc = le16_to_cpu(hdr->frame_control); 413 hdrlen = ieee80211_hdrlen(hdr->frame_control);
447 hdrlen = ieee80211_get_hdrlen(fc);
448 len = skb->len - hdrlen; 414 len = skb->len - hdrlen;
449 415
450 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 416 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
451 tailneed = 0; 417 tail = 0;
452 else 418 else
453 tailneed = CCMP_MIC_LEN; 419 tail = CCMP_MIC_LEN;
454 420
455 if ((skb_headroom(skb) < CCMP_HDR_LEN || 421 if (WARN_ON(skb_tailroom(skb) < tail ||
456 skb_tailroom(skb) < tailneed)) { 422 skb_headroom(skb) < CCMP_HDR_LEN))
457 I802_DEBUG_INC(tx->local->tx_expand_skb_head); 423 return -1;
458 if (unlikely(pskb_expand_head(skb, CCMP_HDR_LEN, tailneed,
459 GFP_ATOMIC)))
460 return -1;
461 }
462 424
463 pos = skb_push(skb, CCMP_HDR_LEN); 425 pos = skb_push(skb, CCMP_HDR_LEN);
464 memmove(pos, pos + CCMP_HDR_LEN, hdrlen); 426 memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
@@ -478,7 +440,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx,
478 440
479 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 441 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
480 /* hwaccel - with preallocated room for CCMP header */ 442 /* hwaccel - with preallocated room for CCMP header */
481 tx->control->key_idx = key->conf.hw_key_idx; 443 info->control.hw_key = &tx->key->conf;
482 return 0; 444 return 0;
483 } 445 }
484 446
@@ -495,28 +457,16 @@ ieee80211_tx_result
495ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) 457ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
496{ 458{
497 struct sk_buff *skb = tx->skb; 459 struct sk_buff *skb = tx->skb;
498 int test = 0;
499 460
500 tx->control->icv_len = CCMP_MIC_LEN;
501 tx->control->iv_len = CCMP_HDR_LEN;
502 ieee80211_tx_set_protected(tx); 461 ieee80211_tx_set_protected(tx);
503 462
504 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 463 if (ccmp_encrypt_skb(tx, skb) < 0)
505 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
506 /* hwaccel - with no need for preallocated room for CCMP "
507 * header or MIC fields */
508 tx->control->key_idx = tx->key->conf.hw_key_idx;
509 return TX_CONTINUE;
510 }
511
512 if (ccmp_encrypt_skb(tx, skb, test) < 0)
513 return TX_DROP; 464 return TX_DROP;
514 465
515 if (tx->extra_frag) { 466 if (tx->extra_frag) {
516 int i; 467 int i;
517 for (i = 0; i < tx->num_extra_frag; i++) { 468 for (i = 0; i < tx->num_extra_frag; i++) {
518 if (ccmp_encrypt_skb(tx, tx->extra_frag[i], test) 469 if (ccmp_encrypt_skb(tx, tx->extra_frag[i]) < 0)
519 < 0)
520 return TX_DROP; 470 return TX_DROP;
521 } 471 }
522 } 472 }
@@ -529,7 +479,6 @@ ieee80211_rx_result
529ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) 479ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
530{ 480{
531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 481 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
532 u16 fc;
533 int hdrlen; 482 int hdrlen;
534 struct ieee80211_key *key = rx->key; 483 struct ieee80211_key *key = rx->key;
535 struct sk_buff *skb = rx->skb; 484 struct sk_buff *skb = rx->skb;
@@ -537,8 +486,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
537 int data_len; 486 int data_len;
538 DECLARE_MAC_BUF(mac); 487 DECLARE_MAC_BUF(mac);
539 488
540 fc = le16_to_cpu(hdr->frame_control); 489 hdrlen = ieee80211_hdrlen(hdr->frame_control);
541 hdrlen = ieee80211_get_hdrlen(fc);
542 490
543 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 491 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
544 return RX_CONTINUE; 492 return RX_CONTINUE;
@@ -554,16 +502,6 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
554 (void) ccmp_hdr2pn(pn, skb->data + hdrlen); 502 (void) ccmp_hdr2pn(pn, skb->data + hdrlen);
555 503
556 if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { 504 if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) {
557#ifdef CONFIG_MAC80211_DEBUG
558 u8 *ppn = key->u.ccmp.rx_pn[rx->queue];
559
560 printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from "
561 "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN "
562 "%02x%02x%02x%02x%02x%02x)\n", rx->dev->name,
563 print_mac(mac, rx->sta->addr),
564 pn[0], pn[1], pn[2], pn[3], pn[4], pn[5],
565 ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]);
566#endif /* CONFIG_MAC80211_DEBUG */
567 key->u.ccmp.replays++; 505 key->u.ccmp.replays++;
568 return RX_DROP_UNUSABLE; 506 return RX_DROP_UNUSABLE;
569 } 507 }
@@ -583,12 +521,6 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
583 skb->data + hdrlen + CCMP_HDR_LEN, data_len, 521 skb->data + hdrlen + CCMP_HDR_LEN, data_len,
584 skb->data + skb->len - CCMP_MIC_LEN, 522 skb->data + skb->len - CCMP_MIC_LEN,
585 skb->data + hdrlen + CCMP_HDR_LEN)) { 523 skb->data + hdrlen + CCMP_HDR_LEN)) {
586#ifdef CONFIG_MAC80211_DEBUG
587 if (net_ratelimit())
588 printk(KERN_DEBUG "%s: CCMP decrypt failed "
589 "for RX frame from %s\n", rx->dev->name,
590 print_mac(mac, rx->sta->addr));
591#endif /* CONFIG_MAC80211_DEBUG */
592 return RX_DROP_UNUSABLE; 524 return RX_DROP_UNUSABLE;
593 } 525 }
594 } 526 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 662c1ccfee26..f27c99246a4c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -847,6 +847,25 @@ acct:
847} 847}
848EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 848EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
849 849
850void __nf_ct_kill_acct(struct nf_conn *ct,
851 enum ip_conntrack_info ctinfo,
852 const struct sk_buff *skb,
853 int do_acct)
854{
855#ifdef CONFIG_NF_CT_ACCT
856 if (do_acct) {
857 spin_lock_bh(&nf_conntrack_lock);
858 ct->counters[CTINFO2DIR(ctinfo)].packets++;
859 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
860 skb->len - skb_network_offset(skb);
861 spin_unlock_bh(&nf_conntrack_lock);
862 }
863#endif
864 if (del_timer(&ct->timeout))
865 ct->timeout.function((unsigned long)ct);
866}
867EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
868
850#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 869#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
851 870
852#include <linux/netfilter/nfnetlink.h> 871#include <linux/netfilter/nfnetlink.h>
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 8a3f8b34e466..3469bc71a385 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -95,13 +95,11 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
95 newlen = newoff + t->len; 95 newlen = newoff + t->len;
96 rcu_read_unlock(); 96 rcu_read_unlock();
97 97
98 if (newlen >= ksize(ct->ext)) { 98 new = krealloc(ct->ext, newlen, gfp);
99 new = kmalloc(newlen, gfp); 99 if (!new)
100 if (!new) 100 return NULL;
101 return NULL;
102
103 memcpy(new, ct->ext, ct->ext->len);
104 101
102 if (new != ct->ext) {
105 for (i = 0; i < NF_CT_EXT_NUM; i++) { 103 for (i = 0; i < NF_CT_EXT_NUM; i++) {
106 if (!nf_ct_ext_exist(ct, i)) 104 if (!nf_ct_ext_exist(ct, i))
107 continue; 105 continue;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0edefcfc5949..63c4e1f299b8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2007 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
8 * 8 *
9 * Initial connection tracking via netlink development funded and 9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -475,14 +475,14 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
475 if (ctnetlink_dump_id(skb, ct) < 0) 475 if (ctnetlink_dump_id(skb, ct) < 0)
476 goto nla_put_failure; 476 goto nla_put_failure;
477 477
478 if (ctnetlink_dump_status(skb, ct) < 0)
479 goto nla_put_failure;
480
478 if (events & IPCT_DESTROY) { 481 if (events & IPCT_DESTROY) {
479 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 482 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
480 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) 483 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
481 goto nla_put_failure; 484 goto nla_put_failure;
482 } else { 485 } else {
483 if (ctnetlink_dump_status(skb, ct) < 0)
484 goto nla_put_failure;
485
486 if (ctnetlink_dump_timeout(skb, ct) < 0) 486 if (ctnetlink_dump_timeout(skb, ct) < 0)
487 goto nla_put_failure; 487 goto nla_put_failure;
488 488
@@ -812,9 +812,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
812 return -ENOENT; 812 return -ENOENT;
813 } 813 }
814 } 814 }
815 if (del_timer(&ct->timeout))
816 ct->timeout.function((unsigned long)ct);
817 815
816 nf_ct_kill(ct);
818 nf_ct_put(ct); 817 nf_ct_put(ct);
819 818
820 return 0; 819 return 0;
@@ -891,20 +890,19 @@ ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[])
891 890
892 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) 891 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
893 /* unchangeable */ 892 /* unchangeable */
894 return -EINVAL; 893 return -EBUSY;
895 894
896 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 895 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
897 /* SEEN_REPLY bit can only be set */ 896 /* SEEN_REPLY bit can only be set */
898 return -EINVAL; 897 return -EBUSY;
899
900 898
901 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 899 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
902 /* ASSURED bit can only be set */ 900 /* ASSURED bit can only be set */
903 return -EINVAL; 901 return -EBUSY;
904 902
905 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 903 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
906#ifndef CONFIG_NF_NAT_NEEDED 904#ifndef CONFIG_NF_NAT_NEEDED
907 return -EINVAL; 905 return -EOPNOTSUPP;
908#else 906#else
909 struct nf_nat_range range; 907 struct nf_nat_range range;
910 908
@@ -945,7 +943,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
945 943
946 /* don't change helper of sibling connections */ 944 /* don't change helper of sibling connections */
947 if (ct->master) 945 if (ct->master)
948 return -EINVAL; 946 return -EBUSY;
949 947
950 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 948 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
951 if (err < 0) 949 if (err < 0)
@@ -963,7 +961,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
963 961
964 helper = __nf_conntrack_helper_find_byname(helpname); 962 helper = __nf_conntrack_helper_find_byname(helpname);
965 if (helper == NULL) 963 if (helper == NULL)
966 return -EINVAL; 964 return -EOPNOTSUPP;
967 965
968 if (help) { 966 if (help) {
969 if (help->helper == helper) 967 if (help->helper == helper)
@@ -1258,12 +1256,12 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1258 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1256 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1259 /* we only allow nat config for new conntracks */ 1257 /* we only allow nat config for new conntracks */
1260 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1258 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1261 err = -EINVAL; 1259 err = -EOPNOTSUPP;
1262 goto out_unlock; 1260 goto out_unlock;
1263 } 1261 }
1264 /* can't link an existing conntrack to a master */ 1262 /* can't link an existing conntrack to a master */
1265 if (cda[CTA_TUPLE_MASTER]) { 1263 if (cda[CTA_TUPLE_MASTER]) {
1266 err = -EINVAL; 1264 err = -EOPNOTSUPP;
1267 goto out_unlock; 1265 goto out_unlock;
1268 } 1266 }
1269 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), 1267 err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h),
@@ -1608,7 +1606,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1608 h = __nf_conntrack_helper_find_byname(name); 1606 h = __nf_conntrack_helper_find_byname(name);
1609 if (!h) { 1607 if (!h) {
1610 spin_unlock_bh(&nf_conntrack_lock); 1608 spin_unlock_bh(&nf_conntrack_lock);
1611 return -EINVAL; 1609 return -EOPNOTSUPP;
1612 } 1610 }
1613 for (i = 0; i < nf_ct_expect_hsize; i++) { 1611 for (i = 0; i < nf_ct_expect_hsize; i++) {
1614 hlist_for_each_entry_safe(exp, n, next, 1612 hlist_for_each_entry_safe(exp, n, next,
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index afb4a1861d2c..e7866dd3cde6 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -475,8 +475,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
475 if (type == DCCP_PKT_RESET && 475 if (type == DCCP_PKT_RESET &&
476 !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 476 !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
477 /* Tear down connection immediately if only reply is a RESET */ 477 /* Tear down connection immediately if only reply is a RESET */
478 if (del_timer(&ct->timeout)) 478 nf_ct_kill_acct(ct, ctinfo, skb);
479 ct->timeout.function((unsigned long)ct);
480 return NF_ACCEPT; 479 return NF_ACCEPT;
481 } 480 }
482 481
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index cbf2e27a22b2..41183a4d2d62 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -463,6 +463,82 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
463 return true; 463 return true;
464} 464}
465 465
466#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
467
468#include <linux/netfilter/nfnetlink.h>
469#include <linux/netfilter/nfnetlink_conntrack.h>
470
471static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
472 const struct nf_conn *ct)
473{
474 struct nlattr *nest_parms;
475
476 read_lock_bh(&sctp_lock);
477 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_SCTP | NLA_F_NESTED);
478 if (!nest_parms)
479 goto nla_put_failure;
480
481 NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state);
482
483 NLA_PUT_BE32(skb,
484 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
485 htonl(ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]));
486
487 NLA_PUT_BE32(skb,
488 CTA_PROTOINFO_SCTP_VTAG_REPLY,
489 htonl(ct->proto.sctp.vtag[IP_CT_DIR_REPLY]));
490
491 read_unlock_bh(&sctp_lock);
492
493 nla_nest_end(skb, nest_parms);
494
495 return 0;
496
497nla_put_failure:
498 read_unlock_bh(&sctp_lock);
499 return -1;
500}
501
502static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
503 [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 },
504 [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 },
505 [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 },
506};
507
508static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
509{
510 struct nlattr *attr = cda[CTA_PROTOINFO_SCTP];
511 struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1];
512 int err;
513
514 /* updates may not contain the internal protocol info, skip parsing */
515 if (!attr)
516 return 0;
517
518 err = nla_parse_nested(tb,
519 CTA_PROTOINFO_SCTP_MAX,
520 attr,
521 sctp_nla_policy);
522 if (err < 0)
523 return err;
524
525 if (!tb[CTA_PROTOINFO_SCTP_STATE] ||
526 !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] ||
527 !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])
528 return -EINVAL;
529
530 write_lock_bh(&sctp_lock);
531 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]);
532 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] =
533 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]));
534 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] =
535 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]));
536 write_unlock_bh(&sctp_lock);
537
538 return 0;
539}
540#endif
541
466#ifdef CONFIG_SYSCTL 542#ifdef CONFIG_SYSCTL
467static unsigned int sctp_sysctl_table_users; 543static unsigned int sctp_sysctl_table_users;
468static struct ctl_table_header *sctp_sysctl_header; 544static struct ctl_table_header *sctp_sysctl_header;
@@ -591,6 +667,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
591 .new = sctp_new, 667 .new = sctp_new,
592 .me = THIS_MODULE, 668 .me = THIS_MODULE,
593#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 669#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
670 .to_nlattr = sctp_to_nlattr,
671 .from_nlattr = nlattr_to_sctp,
594 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 672 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
595 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 673 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
596 .nla_policy = nf_ct_port_nla_policy, 674 .nla_policy = nf_ct_port_nla_policy,
@@ -617,6 +695,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
617 .new = sctp_new, 695 .new = sctp_new,
618 .me = THIS_MODULE, 696 .me = THIS_MODULE,
619#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 697#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
698 .to_nlattr = sctp_to_nlattr,
699 .from_nlattr = nlattr_to_sctp,
620 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 700 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
621 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 701 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
622 .nla_policy = nf_ct_port_nla_policy, 702 .nla_policy = nf_ct_port_nla_policy,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 271cd01d57ae..740acd6bc7d9 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -844,8 +844,7 @@ static int tcp_packet(struct nf_conn *ct,
844 /* Attempt to reopen a closed/aborted connection. 844 /* Attempt to reopen a closed/aborted connection.
845 * Delete this connection and look up again. */ 845 * Delete this connection and look up again. */
846 write_unlock_bh(&tcp_lock); 846 write_unlock_bh(&tcp_lock);
847 if (del_timer(&ct->timeout)) 847 nf_ct_kill(ct);
848 ct->timeout.function((unsigned long)ct);
849 return -NF_REPEAT; 848 return -NF_REPEAT;
850 } 849 }
851 /* Fall through */ 850 /* Fall through */
@@ -878,8 +877,7 @@ static int tcp_packet(struct nf_conn *ct,
878 if (LOG_INVALID(IPPROTO_TCP)) 877 if (LOG_INVALID(IPPROTO_TCP))
879 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 878 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
880 "nf_ct_tcp: killing out of sync session "); 879 "nf_ct_tcp: killing out of sync session ");
881 if (del_timer(&ct->timeout)) 880 nf_ct_kill(ct);
882 ct->timeout.function((unsigned long)ct);
883 return -NF_DROP; 881 return -NF_DROP;
884 } 882 }
885 ct->proto.tcp.last_index = index; 883 ct->proto.tcp.last_index = index;
@@ -962,8 +960,7 @@ static int tcp_packet(struct nf_conn *ct,
962 problem case, so we can delete the conntrack 960 problem case, so we can delete the conntrack
963 immediately. --RR */ 961 immediately. --RR */
964 if (th->rst) { 962 if (th->rst) {
965 if (del_timer(&ct->timeout)) 963 nf_ct_kill_acct(ct, ctinfo, skb);
966 ct->timeout.function((unsigned long)ct);
967 return NF_ACCEPT; 964 return NF_ACCEPT;
968 } 965 }
969 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status) 966 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3447025ce068..04e9c965f8ca 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -243,7 +243,6 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
243 switch ((enum nfqnl_config_mode)queue->copy_mode) { 243 switch ((enum nfqnl_config_mode)queue->copy_mode) {
244 case NFQNL_COPY_META: 244 case NFQNL_COPY_META:
245 case NFQNL_COPY_NONE: 245 case NFQNL_COPY_NONE:
246 data_len = 0;
247 break; 246 break;
248 247
249 case NFQNL_COPY_PACKET: 248 case NFQNL_COPY_PACKET:
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index 211189eb2b67..76ca1f2421eb 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -8,7 +8,7 @@
8 * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> 8 * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
9 * by Henrik Nordstrom <hno@marasystems.com> 9 * by Henrik Nordstrom <hno@marasystems.com>
10 * 10 *
11 * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> 11 * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
@@ -94,6 +94,12 @@ connsecmark_tg_check(const char *tablename, const void *entry,
94{ 94{
95 const struct xt_connsecmark_target_info *info = targinfo; 95 const struct xt_connsecmark_target_info *info = targinfo;
96 96
97 if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) {
98 printk(KERN_INFO PFX "target only valid in the \'mangle\' "
99 "or \'security\' tables, not \'%s\'.\n", tablename);
100 return false;
101 }
102
97 switch (info->mode) { 103 switch (info->mode) {
98 case CONNSECMARK_SAVE: 104 case CONNSECMARK_SAVE:
99 case CONNSECMARK_RESTORE: 105 case CONNSECMARK_RESTORE:
@@ -126,7 +132,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = {
126 .destroy = connsecmark_tg_destroy, 132 .destroy = connsecmark_tg_destroy,
127 .target = connsecmark_tg, 133 .target = connsecmark_tg,
128 .targetsize = sizeof(struct xt_connsecmark_target_info), 134 .targetsize = sizeof(struct xt_connsecmark_target_info),
129 .table = "mangle",
130 .me = THIS_MODULE, 135 .me = THIS_MODULE,
131 }, 136 },
132 { 137 {
@@ -136,7 +141,6 @@ static struct xt_target connsecmark_tg_reg[] __read_mostly = {
136 .destroy = connsecmark_tg_destroy, 141 .destroy = connsecmark_tg_destroy,
137 .target = connsecmark_tg, 142 .target = connsecmark_tg,
138 .targetsize = sizeof(struct xt_connsecmark_target_info), 143 .targetsize = sizeof(struct xt_connsecmark_target_info),
139 .table = "mangle",
140 .me = THIS_MODULE, 144 .me = THIS_MODULE,
141 }, 145 },
142}; 146};
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index c0284856ccd4..94f87ee7552b 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -5,7 +5,7 @@
5 * Based on the nfmark match by: 5 * Based on the nfmark match by:
6 * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> 6 * (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
7 * 7 *
8 * (C) 2006 Red Hat, Inc., James Morris <jmorris@redhat.com> 8 * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -89,6 +89,12 @@ secmark_tg_check(const char *tablename, const void *entry,
89{ 89{
90 struct xt_secmark_target_info *info = targinfo; 90 struct xt_secmark_target_info *info = targinfo;
91 91
92 if (strcmp(tablename, "mangle") && strcmp(tablename, "security")) {
93 printk(KERN_INFO PFX "target only valid in the \'mangle\' "
94 "or \'security\' tables, not \'%s\'.\n", tablename);
95 return false;
96 }
97
92 if (mode && mode != info->mode) { 98 if (mode && mode != info->mode) {
93 printk(KERN_INFO PFX "mode already set to %hu cannot mix with " 99 printk(KERN_INFO PFX "mode already set to %hu cannot mix with "
94 "rules for mode %hu\n", mode, info->mode); 100 "rules for mode %hu\n", mode, info->mode);
@@ -127,7 +133,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
127 .destroy = secmark_tg_destroy, 133 .destroy = secmark_tg_destroy,
128 .target = secmark_tg, 134 .target = secmark_tg,
129 .targetsize = sizeof(struct xt_secmark_target_info), 135 .targetsize = sizeof(struct xt_secmark_target_info),
130 .table = "mangle",
131 .me = THIS_MODULE, 136 .me = THIS_MODULE,
132 }, 137 },
133 { 138 {
@@ -137,7 +142,6 @@ static struct xt_target secmark_tg_reg[] __read_mostly = {
137 .destroy = secmark_tg_destroy, 142 .destroy = secmark_tg_destroy,
138 .target = secmark_tg, 143 .target = secmark_tg,
139 .targetsize = sizeof(struct xt_secmark_target_info), 144 .targetsize = sizeof(struct xt_secmark_target_info),
140 .table = "mangle",
141 .me = THIS_MODULE, 145 .me = THIS_MODULE,
142 }, 146 },
143}; 147};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 349aba189558..98bfe277eab2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -759,7 +759,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
759 * 0: continue 759 * 0: continue
760 * 1: repeat lookup - reference dropped while waiting for socket memory. 760 * 1: repeat lookup - reference dropped while waiting for socket memory.
761 */ 761 */
762int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, 762int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
763 long *timeo, struct sock *ssk) 763 long *timeo, struct sock *ssk)
764{ 764{
765 struct netlink_sock *nlk; 765 struct netlink_sock *nlk;
@@ -892,7 +892,7 @@ retry:
892 return err; 892 return err;
893 } 893 }
894 894
895 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); 895 err = netlink_attachskb(sk, skb, &timeo, ssk);
896 if (err == 1) 896 if (err == 1)
897 goto retry; 897 goto retry;
898 if (err) 898 if (err)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4bae8b998cab..74884f4a6255 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -475,13 +475,11 @@ static struct sock *nr_make_new(struct sock *osk)
475 sock_init_data(NULL, sk); 475 sock_init_data(NULL, sk);
476 476
477 sk->sk_type = osk->sk_type; 477 sk->sk_type = osk->sk_type;
478 sk->sk_socket = osk->sk_socket;
479 sk->sk_priority = osk->sk_priority; 478 sk->sk_priority = osk->sk_priority;
480 sk->sk_protocol = osk->sk_protocol; 479 sk->sk_protocol = osk->sk_protocol;
481 sk->sk_rcvbuf = osk->sk_rcvbuf; 480 sk->sk_rcvbuf = osk->sk_rcvbuf;
482 sk->sk_sndbuf = osk->sk_sndbuf; 481 sk->sk_sndbuf = osk->sk_sndbuf;
483 sk->sk_state = TCP_ESTABLISHED; 482 sk->sk_state = TCP_ESTABLISHED;
484 sk->sk_sleep = osk->sk_sleep;
485 sock_copy_flags(sk, osk); 483 sock_copy_flags(sk, osk);
486 484
487 skb_queue_head_init(&nr->ack_queue); 485 skb_queue_head_init(&nr->ack_queue);
@@ -538,11 +536,9 @@ static int nr_release(struct socket *sock)
538 sk->sk_state_change(sk); 536 sk->sk_state_change(sk);
539 sock_orphan(sk); 537 sock_orphan(sk);
540 sock_set_flag(sk, SOCK_DESTROY); 538 sock_set_flag(sk, SOCK_DESTROY);
541 sk->sk_socket = NULL;
542 break; 539 break;
543 540
544 default: 541 default:
545 sk->sk_socket = NULL;
546 break; 542 break;
547 } 543 }
548 544
@@ -810,13 +806,11 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
810 goto out_release; 806 goto out_release;
811 807
812 newsk = skb->sk; 808 newsk = skb->sk;
813 newsk->sk_socket = newsock; 809 sock_graft(newsk, newsock);
814 newsk->sk_sleep = &newsock->wait;
815 810
816 /* Now attach up the new socket */ 811 /* Now attach up the new socket */
817 kfree_skb(skb); 812 kfree_skb(skb);
818 sk_acceptq_removed(sk); 813 sk_acceptq_removed(sk);
819 newsock->sk = newsk;
820 814
821out_release: 815out_release:
822 release_sock(sk); 816 release_sock(sk);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2cee87da4441..beca6402f1cf 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * PACKET - implements raw packet sockets. 6 * PACKET - implements raw packet sockets.
7 * 7 *
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
9 *
10 * Authors: Ross Biro 8 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index e4b051dbed61..8aa822730145 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -30,39 +30,62 @@ struct rfkill_task {
30 spinlock_t lock; /* for accessing last and desired state */ 30 spinlock_t lock; /* for accessing last and desired state */
31 unsigned long last; /* last schedule */ 31 unsigned long last; /* last schedule */
32 enum rfkill_state desired_state; /* on/off */ 32 enum rfkill_state desired_state; /* on/off */
33 enum rfkill_state current_state; /* on/off */
34}; 33};
35 34
36static void rfkill_task_handler(struct work_struct *work) 35static void rfkill_task_handler(struct work_struct *work)
37{ 36{
38 struct rfkill_task *task = container_of(work, struct rfkill_task, work); 37 struct rfkill_task *task = container_of(work, struct rfkill_task, work);
39 enum rfkill_state state;
40 38
41 mutex_lock(&task->mutex); 39 mutex_lock(&task->mutex);
42 40
43 /* 41 rfkill_switch_all(task->type, task->desired_state);
44 * Use temp variable to fetch desired state to keep it
45 * consistent even if rfkill_schedule_toggle() runs in
46 * another thread or interrupts us.
47 */
48 state = task->desired_state;
49 42
50 if (state != task->current_state) { 43 mutex_unlock(&task->mutex);
51 rfkill_switch_all(task->type, state); 44}
52 task->current_state = state; 45
46static void rfkill_task_epo_handler(struct work_struct *work)
47{
48 rfkill_epo();
49}
50
51static DECLARE_WORK(epo_work, rfkill_task_epo_handler);
52
53static void rfkill_schedule_epo(void)
54{
55 schedule_work(&epo_work);
56}
57
58static void rfkill_schedule_set(struct rfkill_task *task,
59 enum rfkill_state desired_state)
60{
61 unsigned long flags;
62
63 if (unlikely(work_pending(&epo_work)))
64 return;
65
66 spin_lock_irqsave(&task->lock, flags);
67
68 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) {
69 task->desired_state = desired_state;
70 task->last = jiffies;
71 schedule_work(&task->work);
53 } 72 }
54 73
55 mutex_unlock(&task->mutex); 74 spin_unlock_irqrestore(&task->lock, flags);
56} 75}
57 76
58static void rfkill_schedule_toggle(struct rfkill_task *task) 77static void rfkill_schedule_toggle(struct rfkill_task *task)
59{ 78{
60 unsigned long flags; 79 unsigned long flags;
61 80
81 if (unlikely(work_pending(&epo_work)))
82 return;
83
62 spin_lock_irqsave(&task->lock, flags); 84 spin_lock_irqsave(&task->lock, flags);
63 85
64 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) { 86 if (time_after(jiffies, task->last + msecs_to_jiffies(200))) {
65 task->desired_state = !task->desired_state; 87 task->desired_state =
88 rfkill_state_complement(task->desired_state);
66 task->last = jiffies; 89 task->last = jiffies;
67 schedule_work(&task->work); 90 schedule_work(&task->work);
68 } 91 }
@@ -70,26 +93,26 @@ static void rfkill_schedule_toggle(struct rfkill_task *task)
70 spin_unlock_irqrestore(&task->lock, flags); 93 spin_unlock_irqrestore(&task->lock, flags);
71} 94}
72 95
73#define DEFINE_RFKILL_TASK(n, t) \ 96#define DEFINE_RFKILL_TASK(n, t) \
74 struct rfkill_task n = { \ 97 struct rfkill_task n = { \
75 .work = __WORK_INITIALIZER(n.work, \ 98 .work = __WORK_INITIALIZER(n.work, \
76 rfkill_task_handler), \ 99 rfkill_task_handler), \
77 .type = t, \ 100 .type = t, \
78 .mutex = __MUTEX_INITIALIZER(n.mutex), \ 101 .mutex = __MUTEX_INITIALIZER(n.mutex), \
79 .lock = __SPIN_LOCK_UNLOCKED(n.lock), \ 102 .lock = __SPIN_LOCK_UNLOCKED(n.lock), \
80 .desired_state = RFKILL_STATE_ON, \ 103 .desired_state = RFKILL_STATE_UNBLOCKED, \
81 .current_state = RFKILL_STATE_ON, \
82 } 104 }
83 105
84static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); 106static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN);
85static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); 107static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH);
86static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); 108static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB);
87static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); 109static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX);
110static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN);
88 111
89static void rfkill_event(struct input_handle *handle, unsigned int type, 112static void rfkill_event(struct input_handle *handle, unsigned int type,
90 unsigned int code, int down) 113 unsigned int code, int data)
91{ 114{
92 if (type == EV_KEY && down == 1) { 115 if (type == EV_KEY && data == 1) {
93 switch (code) { 116 switch (code) {
94 case KEY_WLAN: 117 case KEY_WLAN:
95 rfkill_schedule_toggle(&rfkill_wlan); 118 rfkill_schedule_toggle(&rfkill_wlan);
@@ -106,6 +129,28 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
106 default: 129 default:
107 break; 130 break;
108 } 131 }
132 } else if (type == EV_SW) {
133 switch (code) {
134 case SW_RFKILL_ALL:
135 /* EVERY radio type. data != 0 means radios ON */
136 /* handle EPO (emergency power off) through shortcut */
137 if (data) {
138 rfkill_schedule_set(&rfkill_wwan,
139 RFKILL_STATE_UNBLOCKED);
140 rfkill_schedule_set(&rfkill_wimax,
141 RFKILL_STATE_UNBLOCKED);
142 rfkill_schedule_set(&rfkill_uwb,
143 RFKILL_STATE_UNBLOCKED);
144 rfkill_schedule_set(&rfkill_bt,
145 RFKILL_STATE_UNBLOCKED);
146 rfkill_schedule_set(&rfkill_wlan,
147 RFKILL_STATE_UNBLOCKED);
148 } else
149 rfkill_schedule_epo();
150 break;
151 default:
152 break;
153 }
109 } 154 }
110} 155}
111 156
@@ -168,6 +213,11 @@ static const struct input_device_id rfkill_ids[] = {
168 .evbit = { BIT_MASK(EV_KEY) }, 213 .evbit = { BIT_MASK(EV_KEY) },
169 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, 214 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
170 }, 215 },
216 {
217 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
218 .evbit = { BIT(EV_SW) },
219 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
220 },
171 { } 221 { }
172}; 222};
173 223
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h
index 4dae5006fc77..f63d05045685 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill-input.h
@@ -12,5 +12,6 @@
12#define __RFKILL_INPUT_H 12#define __RFKILL_INPUT_H
13 13
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
15void rfkill_epo(void);
15 16
16#endif /* __RFKILL_INPUT_H */ 17#endif /* __RFKILL_INPUT_H */
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 4e10a95de832..ce0e23148cdd 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -39,8 +39,56 @@ MODULE_LICENSE("GPL");
39static LIST_HEAD(rfkill_list); /* list of registered rf switches */ 39static LIST_HEAD(rfkill_list); /* list of registered rf switches */
40static DEFINE_MUTEX(rfkill_mutex); 40static DEFINE_MUTEX(rfkill_mutex);
41 41
42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
43module_param_named(default_state, rfkill_default_state, uint, 0444);
44MODULE_PARM_DESC(default_state,
45 "Default initial state for all radio types, 0 = radio off");
46
42static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; 47static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX];
43 48
49static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
50
51
52/**
53 * register_rfkill_notifier - Add notifier to rfkill notifier chain
54 * @nb: pointer to the new entry to add to the chain
55 *
56 * See blocking_notifier_chain_register() for return value and further
57 * observations.
58 *
59 * Adds a notifier to the rfkill notifier chain. The chain will be
60 * called with a pointer to the relevant rfkill structure as a parameter,
61 * refer to include/linux/rfkill.h for the possible events.
62 *
63 * Notifiers added to this chain are to always return NOTIFY_DONE. This
64 * chain is a blocking notifier chain: notifiers can sleep.
65 *
66 * Calls to this chain may have been done through a workqueue. One must
67 * assume unordered asynchronous behaviour, there is no way to know if
68 * actions related to the event that generated the notification have been
69 * carried out already.
70 */
71int register_rfkill_notifier(struct notifier_block *nb)
72{
73 return blocking_notifier_chain_register(&rfkill_notifier_list, nb);
74}
75EXPORT_SYMBOL_GPL(register_rfkill_notifier);
76
77/**
78 * unregister_rfkill_notifier - remove notifier from rfkill notifier chain
79 * @nb: pointer to the entry to remove from the chain
80 *
81 * See blocking_notifier_chain_unregister() for return value and further
82 * observations.
83 *
84 * Removes a notifier from the rfkill notifier chain.
85 */
86int unregister_rfkill_notifier(struct notifier_block *nb)
87{
88 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb);
89}
90EXPORT_SYMBOL_GPL(unregister_rfkill_notifier);
91
44 92
45static void rfkill_led_trigger(struct rfkill *rfkill, 93static void rfkill_led_trigger(struct rfkill *rfkill,
46 enum rfkill_state state) 94 enum rfkill_state state)
@@ -50,24 +98,99 @@ static void rfkill_led_trigger(struct rfkill *rfkill,
50 98
51 if (!led->name) 99 if (!led->name)
52 return; 100 return;
53 if (state == RFKILL_STATE_OFF) 101 if (state != RFKILL_STATE_UNBLOCKED)
54 led_trigger_event(led, LED_OFF); 102 led_trigger_event(led, LED_OFF);
55 else 103 else
56 led_trigger_event(led, LED_FULL); 104 led_trigger_event(led, LED_FULL);
57#endif /* CONFIG_RFKILL_LEDS */ 105#endif /* CONFIG_RFKILL_LEDS */
58} 106}
59 107
108static void notify_rfkill_state_change(struct rfkill *rfkill)
109{
110 blocking_notifier_call_chain(&rfkill_notifier_list,
111 RFKILL_STATE_CHANGED,
112 rfkill);
113}
114
115static void update_rfkill_state(struct rfkill *rfkill)
116{
117 enum rfkill_state newstate, oldstate;
118
119 if (rfkill->get_state) {
120 mutex_lock(&rfkill->mutex);
121 if (!rfkill->get_state(rfkill->data, &newstate)) {
122 oldstate = rfkill->state;
123 rfkill->state = newstate;
124 if (oldstate != newstate)
125 notify_rfkill_state_change(rfkill);
126 }
127 mutex_unlock(&rfkill->mutex);
128 }
129}
130
131/**
132 * rfkill_toggle_radio - wrapper for toggle_radio hook
133 * calls toggle_radio taking into account a lot of "small"
134 * details.
135 * @rfkill: the rfkill struct to use
136 * @force: calls toggle_radio even if cache says it is not needed,
137 * and also makes sure notifications of the state will be
138 * sent even if it didn't change
139 * @state: the new state to call toggle_radio() with
140 *
141 * This wrappen protects and enforces the API for toggle_radio
142 * calls. Note that @force cannot override a (possibly cached)
143 * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of
144 * RFKILL_STATE_HARD_BLOCKED implements either get_state() or
145 * rfkill_force_state(), so the cache either is bypassed or valid.
146 *
147 * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED
148 * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to
149 * give the driver a hint that it should double-BLOCK the transmitter.
150 *
151 * Caller must have aquired rfkill_mutex.
152 */
60static int rfkill_toggle_radio(struct rfkill *rfkill, 153static int rfkill_toggle_radio(struct rfkill *rfkill,
61 enum rfkill_state state) 154 enum rfkill_state state,
155 int force)
62{ 156{
63 int retval = 0; 157 int retval = 0;
158 enum rfkill_state oldstate, newstate;
159
160 oldstate = rfkill->state;
64 161
65 if (state != rfkill->state) { 162 if (rfkill->get_state && !force &&
163 !rfkill->get_state(rfkill->data, &newstate))
164 rfkill->state = newstate;
165
166 switch (state) {
167 case RFKILL_STATE_HARD_BLOCKED:
168 /* typically happens when refreshing hardware state,
169 * such as on resume */
170 state = RFKILL_STATE_SOFT_BLOCKED;
171 break;
172 case RFKILL_STATE_UNBLOCKED:
173 /* force can't override this, only rfkill_force_state() can */
174 if (rfkill->state == RFKILL_STATE_HARD_BLOCKED)
175 return -EPERM;
176 break;
177 case RFKILL_STATE_SOFT_BLOCKED:
178 /* nothing to do, we want to give drivers the hint to double
179 * BLOCK even a transmitter that is already in state
180 * RFKILL_STATE_HARD_BLOCKED */
181 break;
182 }
183
184 if (force || state != rfkill->state) {
66 retval = rfkill->toggle_radio(rfkill->data, state); 185 retval = rfkill->toggle_radio(rfkill->data, state);
67 if (!retval) { 186 /* never allow a HARD->SOFT downgrade! */
187 if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED)
68 rfkill->state = state; 188 rfkill->state = state;
69 rfkill_led_trigger(rfkill, state); 189 }
70 } 190
191 if (force || rfkill->state != oldstate) {
192 rfkill_led_trigger(rfkill, rfkill->state);
193 notify_rfkill_state_change(rfkill);
71 } 194 }
72 195
73 return retval; 196 return retval;
@@ -82,7 +205,6 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
82 * a specific switch is claimed by userspace in which case it is 205 * a specific switch is claimed by userspace in which case it is
83 * left alone. 206 * left alone.
84 */ 207 */
85
86void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 208void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
87{ 209{
88 struct rfkill *rfkill; 210 struct rfkill *rfkill;
@@ -93,13 +215,66 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
93 215
94 list_for_each_entry(rfkill, &rfkill_list, node) { 216 list_for_each_entry(rfkill, &rfkill_list, node) {
95 if ((!rfkill->user_claim) && (rfkill->type == type)) 217 if ((!rfkill->user_claim) && (rfkill->type == type))
96 rfkill_toggle_radio(rfkill, state); 218 rfkill_toggle_radio(rfkill, state, 0);
97 } 219 }
98 220
99 mutex_unlock(&rfkill_mutex); 221 mutex_unlock(&rfkill_mutex);
100} 222}
101EXPORT_SYMBOL(rfkill_switch_all); 223EXPORT_SYMBOL(rfkill_switch_all);
102 224
225/**
226 * rfkill_epo - emergency power off all transmitters
227 *
228 * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring
229 * everything in its path but rfkill_mutex.
230 */
231void rfkill_epo(void)
232{
233 struct rfkill *rfkill;
234
235 mutex_lock(&rfkill_mutex);
236 list_for_each_entry(rfkill, &rfkill_list, node) {
237 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
238 }
239 mutex_unlock(&rfkill_mutex);
240}
241EXPORT_SYMBOL_GPL(rfkill_epo);
242
243/**
244 * rfkill_force_state - Force the internal rfkill radio state
245 * @rfkill: pointer to the rfkill class to modify.
246 * @state: the current radio state the class should be forced to.
247 *
248 * This function updates the internal state of the radio cached
249 * by the rfkill class. It should be used when the driver gets
250 * a notification by the firmware/hardware of the current *real*
251 * state of the radio rfkill switch.
252 *
253 * It may not be called from an atomic context.
254 */
255int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
256{
257 enum rfkill_state oldstate;
258
259 if (state != RFKILL_STATE_SOFT_BLOCKED &&
260 state != RFKILL_STATE_UNBLOCKED &&
261 state != RFKILL_STATE_HARD_BLOCKED)
262 return -EINVAL;
263
264 mutex_lock(&rfkill->mutex);
265
266 oldstate = rfkill->state;
267 rfkill->state = state;
268
269 if (state != oldstate)
270 notify_rfkill_state_change(rfkill);
271
272 mutex_unlock(&rfkill->mutex);
273
274 return 0;
275}
276EXPORT_SYMBOL(rfkill_force_state);
277
103static ssize_t rfkill_name_show(struct device *dev, 278static ssize_t rfkill_name_show(struct device *dev,
104 struct device_attribute *attr, 279 struct device_attribute *attr,
105 char *buf) 280 char *buf)
@@ -109,31 +284,31 @@ static ssize_t rfkill_name_show(struct device *dev,
109 return sprintf(buf, "%s\n", rfkill->name); 284 return sprintf(buf, "%s\n", rfkill->name);
110} 285}
111 286
112static ssize_t rfkill_type_show(struct device *dev, 287static const char *rfkill_get_type_str(enum rfkill_type type)
113 struct device_attribute *attr,
114 char *buf)
115{ 288{
116 struct rfkill *rfkill = to_rfkill(dev); 289 switch (type) {
117 const char *type;
118
119 switch (rfkill->type) {
120 case RFKILL_TYPE_WLAN: 290 case RFKILL_TYPE_WLAN:
121 type = "wlan"; 291 return "wlan";
122 break;
123 case RFKILL_TYPE_BLUETOOTH: 292 case RFKILL_TYPE_BLUETOOTH:
124 type = "bluetooth"; 293 return "bluetooth";
125 break;
126 case RFKILL_TYPE_UWB: 294 case RFKILL_TYPE_UWB:
127 type = "ultrawideband"; 295 return "ultrawideband";
128 break;
129 case RFKILL_TYPE_WIMAX: 296 case RFKILL_TYPE_WIMAX:
130 type = "wimax"; 297 return "wimax";
131 break; 298 case RFKILL_TYPE_WWAN:
299 return "wwan";
132 default: 300 default:
133 BUG(); 301 BUG();
134 } 302 }
303}
304
305static ssize_t rfkill_type_show(struct device *dev,
306 struct device_attribute *attr,
307 char *buf)
308{
309 struct rfkill *rfkill = to_rfkill(dev);
135 310
136 return sprintf(buf, "%s\n", type); 311 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
137} 312}
138 313
139static ssize_t rfkill_state_show(struct device *dev, 314static ssize_t rfkill_state_show(struct device *dev,
@@ -142,6 +317,7 @@ static ssize_t rfkill_state_show(struct device *dev,
142{ 317{
143 struct rfkill *rfkill = to_rfkill(dev); 318 struct rfkill *rfkill = to_rfkill(dev);
144 319
320 update_rfkill_state(rfkill);
145 return sprintf(buf, "%d\n", rfkill->state); 321 return sprintf(buf, "%d\n", rfkill->state);
146} 322}
147 323
@@ -156,10 +332,14 @@ static ssize_t rfkill_state_store(struct device *dev,
156 if (!capable(CAP_NET_ADMIN)) 332 if (!capable(CAP_NET_ADMIN))
157 return -EPERM; 333 return -EPERM;
158 334
335 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
336 if (state != RFKILL_STATE_UNBLOCKED &&
337 state != RFKILL_STATE_SOFT_BLOCKED)
338 return -EINVAL;
339
159 if (mutex_lock_interruptible(&rfkill->mutex)) 340 if (mutex_lock_interruptible(&rfkill->mutex))
160 return -ERESTARTSYS; 341 return -ERESTARTSYS;
161 error = rfkill_toggle_radio(rfkill, 342 error = rfkill_toggle_radio(rfkill, state, 0);
162 state ? RFKILL_STATE_ON : RFKILL_STATE_OFF);
163 mutex_unlock(&rfkill->mutex); 343 mutex_unlock(&rfkill->mutex);
164 344
165 return error ? error : count; 345 return error ? error : count;
@@ -200,7 +380,8 @@ static ssize_t rfkill_claim_store(struct device *dev,
200 if (rfkill->user_claim != claim) { 380 if (rfkill->user_claim != claim) {
201 if (!claim) 381 if (!claim)
202 rfkill_toggle_radio(rfkill, 382 rfkill_toggle_radio(rfkill,
203 rfkill_states[rfkill->type]); 383 rfkill_states[rfkill->type],
384 0);
204 rfkill->user_claim = claim; 385 rfkill->user_claim = claim;
205 } 386 }
206 387
@@ -233,12 +414,12 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
233 414
234 if (dev->power.power_state.event != state.event) { 415 if (dev->power.power_state.event != state.event) {
235 if (state.event & PM_EVENT_SLEEP) { 416 if (state.event & PM_EVENT_SLEEP) {
236 mutex_lock(&rfkill->mutex); 417 /* Stop transmitter, keep state, no notifies */
237 418 update_rfkill_state(rfkill);
238 if (rfkill->state == RFKILL_STATE_ON)
239 rfkill->toggle_radio(rfkill->data,
240 RFKILL_STATE_OFF);
241 419
420 mutex_lock(&rfkill->mutex);
421 rfkill->toggle_radio(rfkill->data,
422 RFKILL_STATE_SOFT_BLOCKED);
242 mutex_unlock(&rfkill->mutex); 423 mutex_unlock(&rfkill->mutex);
243 } 424 }
244 425
@@ -255,8 +436,8 @@ static int rfkill_resume(struct device *dev)
255 if (dev->power.power_state.event != PM_EVENT_ON) { 436 if (dev->power.power_state.event != PM_EVENT_ON) {
256 mutex_lock(&rfkill->mutex); 437 mutex_lock(&rfkill->mutex);
257 438
258 if (rfkill->state == RFKILL_STATE_ON) 439 /* restore radio state AND notify everybody */
259 rfkill->toggle_radio(rfkill->data, RFKILL_STATE_ON); 440 rfkill_toggle_radio(rfkill, rfkill->state, 1);
260 441
261 mutex_unlock(&rfkill->mutex); 442 mutex_unlock(&rfkill->mutex);
262 } 443 }
@@ -269,12 +450,51 @@ static int rfkill_resume(struct device *dev)
269#define rfkill_resume NULL 450#define rfkill_resume NULL
270#endif 451#endif
271 452
453static int rfkill_blocking_uevent_notifier(struct notifier_block *nb,
454 unsigned long eventid,
455 void *data)
456{
457 struct rfkill *rfkill = (struct rfkill *)data;
458
459 switch (eventid) {
460 case RFKILL_STATE_CHANGED:
461 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
462 break;
463 default:
464 break;
465 }
466
467 return NOTIFY_DONE;
468}
469
470static struct notifier_block rfkill_blocking_uevent_nb = {
471 .notifier_call = rfkill_blocking_uevent_notifier,
472 .priority = 0,
473};
474
475static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
476{
477 struct rfkill *rfkill = to_rfkill(dev);
478 int error;
479
480 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
481 if (error)
482 return error;
483 error = add_uevent_var(env, "RFKILL_TYPE=%s",
484 rfkill_get_type_str(rfkill->type));
485 if (error)
486 return error;
487 error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state);
488 return error;
489}
490
272static struct class rfkill_class = { 491static struct class rfkill_class = {
273 .name = "rfkill", 492 .name = "rfkill",
274 .dev_release = rfkill_release, 493 .dev_release = rfkill_release,
275 .dev_attrs = rfkill_dev_attrs, 494 .dev_attrs = rfkill_dev_attrs,
276 .suspend = rfkill_suspend, 495 .suspend = rfkill_suspend,
277 .resume = rfkill_resume, 496 .resume = rfkill_resume,
497 .dev_uevent = rfkill_dev_uevent,
278}; 498};
279 499
280static int rfkill_add_switch(struct rfkill *rfkill) 500static int rfkill_add_switch(struct rfkill *rfkill)
@@ -283,7 +503,7 @@ static int rfkill_add_switch(struct rfkill *rfkill)
283 503
284 mutex_lock(&rfkill_mutex); 504 mutex_lock(&rfkill_mutex);
285 505
286 error = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]); 506 error = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0);
287 if (!error) 507 if (!error)
288 list_add_tail(&rfkill->node, &rfkill_list); 508 list_add_tail(&rfkill->node, &rfkill_list);
289 509
@@ -296,7 +516,7 @@ static void rfkill_remove_switch(struct rfkill *rfkill)
296{ 516{
297 mutex_lock(&rfkill_mutex); 517 mutex_lock(&rfkill_mutex);
298 list_del_init(&rfkill->node); 518 list_del_init(&rfkill->node);
299 rfkill_toggle_radio(rfkill, RFKILL_STATE_OFF); 519 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
300 mutex_unlock(&rfkill_mutex); 520 mutex_unlock(&rfkill_mutex);
301} 521}
302 522
@@ -412,7 +632,7 @@ int rfkill_register(struct rfkill *rfkill)
412EXPORT_SYMBOL(rfkill_register); 632EXPORT_SYMBOL(rfkill_register);
413 633
414/** 634/**
415 * rfkill_unregister - Uegister a rfkill structure. 635 * rfkill_unregister - Unregister a rfkill structure.
416 * @rfkill: rfkill structure to be unregistered 636 * @rfkill: rfkill structure to be unregistered
417 * 637 *
418 * This function should be called by the network driver during device 638 * This function should be called by the network driver during device
@@ -436,8 +656,13 @@ static int __init rfkill_init(void)
436 int error; 656 int error;
437 int i; 657 int i;
438 658
659 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
660 if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED &&
661 rfkill_default_state != RFKILL_STATE_UNBLOCKED)
662 return -EINVAL;
663
439 for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) 664 for (i = 0; i < ARRAY_SIZE(rfkill_states); i++)
440 rfkill_states[i] = RFKILL_STATE_ON; 665 rfkill_states[i] = rfkill_default_state;
441 666
442 error = class_register(&rfkill_class); 667 error = class_register(&rfkill_class);
443 if (error) { 668 if (error) {
@@ -445,11 +670,14 @@ static int __init rfkill_init(void)
445 return error; 670 return error;
446 } 671 }
447 672
673 register_rfkill_notifier(&rfkill_blocking_uevent_nb);
674
448 return 0; 675 return 0;
449} 676}
450 677
451static void __exit rfkill_exit(void) 678static void __exit rfkill_exit(void)
452{ 679{
680 unregister_rfkill_notifier(&rfkill_blocking_uevent_nb);
453 class_unregister(&rfkill_class); 681 class_unregister(&rfkill_class);
454} 682}
455 683
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 1ebf65294405..46461a69cd0f 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -566,13 +566,11 @@ static struct sock *rose_make_new(struct sock *osk)
566#endif 566#endif
567 567
568 sk->sk_type = osk->sk_type; 568 sk->sk_type = osk->sk_type;
569 sk->sk_socket = osk->sk_socket;
570 sk->sk_priority = osk->sk_priority; 569 sk->sk_priority = osk->sk_priority;
571 sk->sk_protocol = osk->sk_protocol; 570 sk->sk_protocol = osk->sk_protocol;
572 sk->sk_rcvbuf = osk->sk_rcvbuf; 571 sk->sk_rcvbuf = osk->sk_rcvbuf;
573 sk->sk_sndbuf = osk->sk_sndbuf; 572 sk->sk_sndbuf = osk->sk_sndbuf;
574 sk->sk_state = TCP_ESTABLISHED; 573 sk->sk_state = TCP_ESTABLISHED;
575 sk->sk_sleep = osk->sk_sleep;
576 sock_copy_flags(sk, osk); 574 sock_copy_flags(sk, osk);
577 575
578 init_timer(&rose->timer); 576 init_timer(&rose->timer);
@@ -759,7 +757,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
759 sock->state = SS_UNCONNECTED; 757 sock->state = SS_UNCONNECTED;
760 758
761 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, 759 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
762 &diagnostic); 760 &diagnostic, 0);
763 if (!rose->neighbour) { 761 if (!rose->neighbour) {
764 err = -ENETUNREACH; 762 err = -ENETUNREACH;
765 goto out_release; 763 goto out_release;
@@ -855,7 +853,7 @@ rose_try_next_neigh:
855 853
856 if (sk->sk_state != TCP_ESTABLISHED) { 854 if (sk->sk_state != TCP_ESTABLISHED) {
857 /* Try next neighbour */ 855 /* Try next neighbour */
858 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic); 856 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
859 if (rose->neighbour) 857 if (rose->neighbour)
860 goto rose_try_next_neigh; 858 goto rose_try_next_neigh;
861 859
@@ -924,14 +922,12 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
924 goto out_release; 922 goto out_release;
925 923
926 newsk = skb->sk; 924 newsk = skb->sk;
927 newsk->sk_socket = newsock; 925 sock_graft(newsk, newsock);
928 newsk->sk_sleep = &newsock->wait;
929 926
930 /* Now attach up the new socket */ 927 /* Now attach up the new socket */
931 skb->sk = NULL; 928 skb->sk = NULL;
932 kfree_skb(skb); 929 kfree_skb(skb);
933 sk->sk_ack_backlog--; 930 sk->sk_ack_backlog--;
934 newsock->sk = newsk;
935 931
936out_release: 932out_release:
937 release_sock(sk); 933 release_sock(sk);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index bd593871c81e..a81066a1010a 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -662,27 +662,34 @@ struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neig
662} 662}
663 663
664/* 664/*
665 * Find a neighbour given a ROSE address. 665 * Find a neighbour or a route given a ROSE address.
666 */ 666 */
667struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, 667struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
668 unsigned char *diagnostic) 668 unsigned char *diagnostic, int new)
669{ 669{
670 struct rose_neigh *res = NULL; 670 struct rose_neigh *res = NULL;
671 struct rose_node *node; 671 struct rose_node *node;
672 int failed = 0; 672 int failed = 0;
673 int i; 673 int i;
674 674
675 spin_lock_bh(&rose_node_list_lock); 675 if (!new) spin_lock_bh(&rose_node_list_lock);
676 for (node = rose_node_list; node != NULL; node = node->next) { 676 for (node = rose_node_list; node != NULL; node = node->next) {
677 if (rosecmpm(addr, &node->address, node->mask) == 0) { 677 if (rosecmpm(addr, &node->address, node->mask) == 0) {
678 for (i = 0; i < node->count; i++) { 678 for (i = 0; i < node->count; i++) {
679 if (!rose_ftimer_running(node->neighbour[i])) { 679 if (new) {
680 res = node->neighbour[i]; 680 if (node->neighbour[i]->restarted) {
681 goto out; 681 res = node->neighbour[i];
682 } else 682 goto out;
683 failed = 1; 683 }
684 }
685 else {
686 if (!rose_ftimer_running(node->neighbour[i])) {
687 res = node->neighbour[i];
688 goto out;
689 } else
690 failed = 1;
691 }
684 } 692 }
685 break;
686 } 693 }
687 } 694 }
688 695
@@ -695,7 +702,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
695 } 702 }
696 703
697out: 704out:
698 spin_unlock_bh(&rose_node_list_lock); 705 if (!new) spin_unlock_bh(&rose_node_list_lock);
699 706
700 return res; 707 return res;
701} 708}
@@ -1018,7 +1025,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1018 rose_route = rose_route->next; 1025 rose_route = rose_route->next;
1019 } 1026 }
1020 1027
1021 if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic)) == NULL) { 1028 if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) {
1022 rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic); 1029 rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic);
1023 goto out; 1030 goto out;
1024 } 1031 }
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index f8a699e92962..f98c8027e5c1 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -21,6 +21,7 @@
21#include <net/af_rxrpc.h> 21#include <net/af_rxrpc.h>
22#include <net/ip.h> 22#include <net/ip.h>
23#include <net/udp.h> 23#include <net/udp.h>
24#include <net/net_namespace.h>
24#include "ar-internal.h" 25#include "ar-internal.h"
25 26
26unsigned long rxrpc_ack_timeout = 1; 27unsigned long rxrpc_ack_timeout = 1;
@@ -708,12 +709,12 @@ void rxrpc_data_ready(struct sock *sk, int count)
708 if (skb_checksum_complete(skb)) { 709 if (skb_checksum_complete(skb)) {
709 rxrpc_free_skb(skb); 710 rxrpc_free_skb(skb);
710 rxrpc_put_local(local); 711 rxrpc_put_local(local);
711 UDP_INC_STATS_BH(UDP_MIB_INERRORS, 0); 712 UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0);
712 _leave(" [CSUM failed]"); 713 _leave(" [CSUM failed]");
713 return; 714 return;
714 } 715 }
715 716
716 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 0); 717 UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0);
717 718
718 /* the socket buffer we have is owned by UDP, with UDP's data all over 719 /* the socket buffer we have is owned by UDP, with UDP's data all over
719 * it, but we really want our own */ 720 * it, but we really want our own */
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 3fb58f428f72..0284791169c9 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -24,8 +24,6 @@
24 * Jiri Fojtasek 24 * Jiri Fojtasek
25 * fixed requeue routine 25 * fixed requeue routine
26 * and many others. thanks. 26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */ 27 */
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 024c3ebd9661..35b6a023a6d0 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -136,6 +136,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
136 136
137 /* Set association default SACK delay */ 137 /* Set association default SACK delay */
138 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 138 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
139 asoc->sackfreq = sp->sackfreq;
139 140
140 /* Set the association default flags controlling 141 /* Set the association default flags controlling
141 * Heartbeat, SACK delay, and Path MTU Discovery. 142 * Heartbeat, SACK delay, and Path MTU Discovery.
@@ -261,6 +262,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
261 * already received one packet.] 262 * already received one packet.]
262 */ 263 */
263 asoc->peer.sack_needed = 1; 264 asoc->peer.sack_needed = 1;
265 asoc->peer.sack_cnt = 0;
264 266
265 /* Assume that the peer will tell us if he recognizes ASCONF 267 /* Assume that the peer will tell us if he recognizes ASCONF
266 * as part of INIT exchange. 268 * as part of INIT exchange.
@@ -624,6 +626,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
624 * association configured value. 626 * association configured value.
625 */ 627 */
626 peer->sackdelay = asoc->sackdelay; 628 peer->sackdelay = asoc->sackdelay;
629 peer->sackfreq = asoc->sackfreq;
627 630
628 /* Enable/disable heartbeat, SACK delay, and path MTU discovery 631 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
629 * based on association setting. 632 * based on association setting.
diff --git a/net/sctp/input.c b/net/sctp/input.c
index ca6b022b1df2..d354a23972d4 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -430,6 +430,9 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
430 struct sock *sk = NULL; 430 struct sock *sk = NULL;
431 struct sctp_association *asoc; 431 struct sctp_association *asoc;
432 struct sctp_transport *transport = NULL; 432 struct sctp_transport *transport = NULL;
433 struct sctp_init_chunk *chunkhdr;
434 __u32 vtag = ntohl(sctphdr->vtag);
435 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
433 436
434 *app = NULL; *tpp = NULL; 437 *app = NULL; *tpp = NULL;
435 438
@@ -451,8 +454,28 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
451 454
452 sk = asoc->base.sk; 455 sk = asoc->base.sk;
453 456
454 if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) { 457 /* RFC 4960, Appendix C. ICMP Handling
455 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 458 *
459 * ICMP6) An implementation MUST validate that the Verification Tag
460 * contained in the ICMP message matches the Verification Tag of
461 * the peer. If the Verification Tag is not 0 and does NOT
462 * match, discard the ICMP message. If it is 0 and the ICMP
463 * message contains enough bytes to verify that the chunk type is
464 * an INIT chunk and that the Initiate Tag matches the tag of the
465 * peer, continue with ICMP7. If the ICMP message is too short
466 * or the chunk type or the Initiate Tag does not match, silently
467 * discard the packet.
468 */
469 if (vtag == 0) {
470 chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr
471 + sizeof(struct sctphdr));
472 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
473 + sizeof(__be32) ||
474 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
475 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
476 goto out;
477 }
478 } else if (vtag != asoc->c.peer_vtag) {
456 goto out; 479 goto out;
457 } 480 }
458 481
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 6d45bae93b46..abcd00dc05eb 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -157,7 +157,8 @@ void sctp_packet_free(struct sctp_packet *packet)
157 * packet can be sent only after receiving the COOKIE_ACK. 157 * packet can be sent only after receiving the COOKIE_ACK.
158 */ 158 */
159sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, 159sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
160 struct sctp_chunk *chunk) 160 struct sctp_chunk *chunk,
161 int one_packet)
161{ 162{
162 sctp_xmit_t retval; 163 sctp_xmit_t retval;
163 int error = 0; 164 int error = 0;
@@ -175,7 +176,9 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
175 /* If we have an empty packet, then we can NOT ever 176 /* If we have an empty packet, then we can NOT ever
176 * return PMTU_FULL. 177 * return PMTU_FULL.
177 */ 178 */
178 retval = sctp_packet_append_chunk(packet, chunk); 179 if (!one_packet)
180 retval = sctp_packet_append_chunk(packet,
181 chunk);
179 } 182 }
180 break; 183 break;
181 184
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index ace6770e9048..70ead8dc3485 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -702,6 +702,7 @@ int sctp_outq_uncork(struct sctp_outq *q)
702 return error; 702 return error;
703} 703}
704 704
705
705/* 706/*
706 * Try to flush an outqueue. 707 * Try to flush an outqueue.
707 * 708 *
@@ -725,6 +726,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
725 sctp_xmit_t status; 726 sctp_xmit_t status;
726 int error = 0; 727 int error = 0;
727 int start_timer = 0; 728 int start_timer = 0;
729 int one_packet = 0;
728 730
729 /* These transports have chunks to send. */ 731 /* These transports have chunks to send. */
730 struct list_head transport_list; 732 struct list_head transport_list;
@@ -830,20 +832,33 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
830 if (sctp_test_T_bit(chunk)) { 832 if (sctp_test_T_bit(chunk)) {
831 packet->vtag = asoc->c.my_vtag; 833 packet->vtag = asoc->c.my_vtag;
832 } 834 }
833 case SCTP_CID_SACK: 835 /* The following chunks are "response" chunks, i.e.
834 case SCTP_CID_HEARTBEAT: 836 * they are generated in response to something we
837 * received. If we are sending these, then we can
838 * send only 1 packet containing these chunks.
839 */
835 case SCTP_CID_HEARTBEAT_ACK: 840 case SCTP_CID_HEARTBEAT_ACK:
836 case SCTP_CID_SHUTDOWN:
837 case SCTP_CID_SHUTDOWN_ACK: 841 case SCTP_CID_SHUTDOWN_ACK:
838 case SCTP_CID_ERROR:
839 case SCTP_CID_COOKIE_ECHO:
840 case SCTP_CID_COOKIE_ACK: 842 case SCTP_CID_COOKIE_ACK:
841 case SCTP_CID_ECN_ECNE: 843 case SCTP_CID_COOKIE_ECHO:
844 case SCTP_CID_ERROR:
842 case SCTP_CID_ECN_CWR: 845 case SCTP_CID_ECN_CWR:
843 case SCTP_CID_ASCONF:
844 case SCTP_CID_ASCONF_ACK: 846 case SCTP_CID_ASCONF_ACK:
847 one_packet = 1;
848 /* Fall throught */
849
850 case SCTP_CID_SACK:
851 case SCTP_CID_HEARTBEAT:
852 case SCTP_CID_SHUTDOWN:
853 case SCTP_CID_ECN_ECNE:
854 case SCTP_CID_ASCONF:
845 case SCTP_CID_FWD_TSN: 855 case SCTP_CID_FWD_TSN:
846 sctp_packet_transmit_chunk(packet, chunk); 856 status = sctp_packet_transmit_chunk(packet, chunk,
857 one_packet);
858 if (status != SCTP_XMIT_OK) {
859 /* put the chunk back */
860 list_add(&chunk->list, &q->control_chunk_list);
861 }
847 break; 862 break;
848 863
849 default: 864 default:
@@ -974,7 +989,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
974 atomic_read(&chunk->skb->users) : -1); 989 atomic_read(&chunk->skb->users) : -1);
975 990
976 /* Add the chunk to the packet. */ 991 /* Add the chunk to the packet. */
977 status = sctp_packet_transmit_chunk(packet, chunk); 992 status = sctp_packet_transmit_chunk(packet, chunk, 0);
978 993
979 switch (status) { 994 switch (status) {
980 case SCTP_XMIT_PMTU_FULL: 995 case SCTP_XMIT_PMTU_FULL:
@@ -1239,7 +1254,6 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1239 * Make sure the empty queue handler will get run later. 1254 * Make sure the empty queue handler will get run later.
1240 */ 1255 */
1241 q->empty = (list_empty(&q->out_chunk_list) && 1256 q->empty = (list_empty(&q->out_chunk_list) &&
1242 list_empty(&q->control_chunk_list) &&
1243 list_empty(&q->retransmit)); 1257 list_empty(&q->retransmit));
1244 if (!q->empty) 1258 if (!q->empty)
1245 goto finish; 1259 goto finish;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 0aba759cb9b7..5dd89831eceb 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -383,3 +383,144 @@ void sctp_assocs_proc_exit(void)
383{ 383{
384 remove_proc_entry("assocs", proc_net_sctp); 384 remove_proc_entry("assocs", proc_net_sctp);
385} 385}
386
387static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
388{
389 if (*pos >= sctp_assoc_hashsize)
390 return NULL;
391
392 if (*pos < 0)
393 *pos = 0;
394
395 if (*pos == 0)
396 seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
397 "REM_ADDR_RTX START\n");
398
399 return (void *)pos;
400}
401
402static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos)
403{
404 if (++*pos >= sctp_assoc_hashsize)
405 return NULL;
406
407 return pos;
408}
409
410static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
411{
412 return;
413}
414
415static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
416{
417 struct sctp_hashbucket *head;
418 struct sctp_ep_common *epb;
419 struct sctp_association *assoc;
420 struct hlist_node *node;
421 struct sctp_transport *tsp;
422 int hash = *(loff_t *)v;
423
424 if (hash >= sctp_assoc_hashsize)
425 return -ENOMEM;
426
427 head = &sctp_assoc_hashtable[hash];
428 sctp_local_bh_disable();
429 read_lock(&head->lock);
430 sctp_for_each_hentry(epb, node, &head->chain) {
431 assoc = sctp_assoc(epb);
432 list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
433 transports) {
434 /*
435 * The remote address (ADDR)
436 */
437 tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr);
438 seq_printf(seq, " ");
439
440 /*
441 * The association ID (ASSOC_ID)
442 */
443 seq_printf(seq, "%d ", tsp->asoc->assoc_id);
444
445 /*
446 * If the Heartbeat is active (HB_ACT)
447 * Note: 1 = Active, 0 = Inactive
448 */
449 seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer));
450
451 /*
452 * Retransmit time out (RTO)
453 */
454 seq_printf(seq, "%lu ", tsp->rto);
455
456 /*
457 * Maximum path retransmit count (PATH_MAX_RTX)
458 */
459 seq_printf(seq, "%d ", tsp->pathmaxrxt);
460
461 /*
462 * remote address retransmit count (REM_ADDR_RTX)
463 * Note: We don't have a way to tally this at the moment
464 * so lets just leave it as zero for the moment
465 */
466 seq_printf(seq, "0 ");
467
468 /*
469 * remote address start time (START). This is also not
470 * currently implemented, but we can record it with a
471 * jiffies marker in a subsequent patch
472 */
473 seq_printf(seq, "0");
474
475 seq_printf(seq, "\n");
476 }
477 }
478
479 read_unlock(&head->lock);
480 sctp_local_bh_enable();
481
482 return 0;
483
484}
485
486static const struct seq_operations sctp_remaddr_ops = {
487 .start = sctp_remaddr_seq_start,
488 .next = sctp_remaddr_seq_next,
489 .stop = sctp_remaddr_seq_stop,
490 .show = sctp_remaddr_seq_show,
491};
492
493/* Cleanup the proc fs entry for 'remaddr' object. */
494void sctp_remaddr_proc_exit(void)
495{
496 remove_proc_entry("remaddr", proc_net_sctp);
497}
498
499static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
500{
501 return seq_open(file, &sctp_remaddr_ops);
502}
503
504static const struct file_operations sctp_remaddr_seq_fops = {
505 .open = sctp_remaddr_seq_open,
506 .read = seq_read,
507 .llseek = seq_lseek,
508 .release = seq_release,
509};
510
511int __init sctp_remaddr_proc_init(void)
512{
513 struct proc_dir_entry *p;
514
515 p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp);
516 if (!p)
517 return -ENOMEM;
518 p->proc_fops = &sctp_remaddr_seq_fops;
519
520 return 0;
521}
522
523void sctp_assoc_proc_exit(void)
524{
525 remove_proc_entry("remaddr", proc_net_sctp);
526}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 9258dfe784ae..98c6a882016a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -113,9 +113,13 @@ static __init int sctp_proc_init(void)
113 goto out_eps_proc_init; 113 goto out_eps_proc_init;
114 if (sctp_assocs_proc_init()) 114 if (sctp_assocs_proc_init())
115 goto out_assocs_proc_init; 115 goto out_assocs_proc_init;
116 if (sctp_remaddr_proc_init())
117 goto out_remaddr_proc_init;
116 118
117 return 0; 119 return 0;
118 120
121out_remaddr_proc_init:
122 sctp_assocs_proc_exit();
119out_assocs_proc_init: 123out_assocs_proc_init:
120 sctp_eps_proc_exit(); 124 sctp_eps_proc_exit();
121out_eps_proc_init: 125out_eps_proc_init:
@@ -138,6 +142,7 @@ static void sctp_proc_exit(void)
138 sctp_snmp_proc_exit(); 142 sctp_snmp_proc_exit();
139 sctp_eps_proc_exit(); 143 sctp_eps_proc_exit();
140 sctp_assocs_proc_exit(); 144 sctp_assocs_proc_exit();
145 sctp_remaddr_proc_exit();
141 146
142 if (proc_net_sctp) { 147 if (proc_net_sctp) {
143 proc_net_sctp = NULL; 148 proc_net_sctp = NULL;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 23a9f1a95b7d..9732c797e8ed 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -190,20 +190,28 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
190 * unacknowledged DATA chunk. ... 190 * unacknowledged DATA chunk. ...
191 */ 191 */
192 if (!asoc->peer.sack_needed) { 192 if (!asoc->peer.sack_needed) {
193 /* We will need a SACK for the next packet. */ 193 asoc->peer.sack_cnt++;
194 asoc->peer.sack_needed = 1;
195 194
196 /* Set the SACK delay timeout based on the 195 /* Set the SACK delay timeout based on the
197 * SACK delay for the last transport 196 * SACK delay for the last transport
198 * data was received from, or the default 197 * data was received from, or the default
199 * for the association. 198 * for the association.
200 */ 199 */
201 if (trans) 200 if (trans) {
201 /* We will need a SACK for the next packet. */
202 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
203 asoc->peer.sack_needed = 1;
204
202 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
203 trans->sackdelay; 206 trans->sackdelay;
204 else 207 } else {
208 /* We will need a SACK for the next packet. */
209 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
210 asoc->peer.sack_needed = 1;
211
205 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 212 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
206 asoc->sackdelay; 213 asoc->sackdelay;
214 }
207 215
208 /* Restart the SACK timer. */ 216 /* Restart the SACK timer. */
209 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 217 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
@@ -216,6 +224,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
216 goto nomem; 224 goto nomem;
217 225
218 asoc->peer.sack_needed = 0; 226 asoc->peer.sack_needed = 0;
227 asoc->peer.sack_cnt = 0;
219 228
220 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); 229 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
221 230
@@ -655,7 +664,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
655 struct sctp_association *asoc, 664 struct sctp_association *asoc,
656 struct sctp_sackhdr *sackh) 665 struct sctp_sackhdr *sackh)
657{ 666{
658 int err; 667 int err = 0;
659 668
660 if (sctp_outq_sack(&asoc->outqueue, sackh)) { 669 if (sctp_outq_sack(&asoc->outqueue, sackh)) {
661 /* There are no more TSNs awaiting SACK. */ 670 /* There are no more TSNs awaiting SACK. */
@@ -663,11 +672,6 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
663 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), 672 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
664 asoc->state, asoc->ep, asoc, NULL, 673 asoc->state, asoc->ep, asoc, NULL,
665 GFP_ATOMIC); 674 GFP_ATOMIC);
666 } else {
667 /* Windows may have opened, so we need
668 * to check if we have DATA to transmit
669 */
670 err = sctp_outq_flush(&asoc->outqueue, 0);
671 } 675 }
672 676
673 return err; 677 return err;
@@ -1472,8 +1476,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1472 break; 1476 break;
1473 1477
1474 case SCTP_CMD_DISCARD_PACKET: 1478 case SCTP_CMD_DISCARD_PACKET:
1475 /* We need to discard the whole packet. */ 1479 /* We need to discard the whole packet.
1480 * Uncork the queue since there might be
1481 * responses pending
1482 */
1476 chunk->pdiscard = 1; 1483 chunk->pdiscard = 1;
1484 if (asoc) {
1485 sctp_outq_uncork(&asoc->outqueue);
1486 local_cork = 0;
1487 }
1477 break; 1488 break;
1478 1489
1479 case SCTP_CMD_RTO_PENDING: 1490 case SCTP_CMD_RTO_PENDING:
@@ -1544,8 +1555,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1544 } 1555 }
1545 1556
1546out: 1557out:
1547 if (local_cork) 1558 /* If this is in response to a received chunk, wait until
1548 sctp_outq_uncork(&asoc->outqueue); 1559 * we are done with the packet to open the queue so that we don't
1560 * send multiple packets in response to a single request.
1561 */
1562 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1563 if (chunk->end_of_packet || chunk->singleton)
1564 sctp_outq_uncork(&asoc->outqueue);
1565 } else if (local_cork)
1566 sctp_outq_uncork(&asoc->outqueue);
1549 return error; 1567 return error;
1550nomem: 1568nomem:
1551 error = -ENOMEM; 1569 error = -ENOMEM;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 0c9d5a6950fe..b66a41d03c0d 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -795,8 +795,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
795 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 795 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
796 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 796 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
797 797
798 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
799
800 /* This will send the COOKIE ACK */ 798 /* This will send the COOKIE ACK */
801 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 799 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
802 800
@@ -883,7 +881,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
883 if (asoc->autoclose) 881 if (asoc->autoclose)
884 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 882 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
885 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); 883 SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
886 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
887 884
888 /* It may also notify its ULP about the successful 885 /* It may also notify its ULP about the successful
889 * establishment of the association with a Communication Up 886 * establishment of the association with a Communication Up
@@ -1781,7 +1778,6 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1781 goto nomem; 1778 goto nomem;
1782 1779
1783 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1780 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1784 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
1785 1781
1786 /* RFC 2960 5.1 Normal Establishment of an Association 1782 /* RFC 2960 5.1 Normal Establishment of an Association
1787 * 1783 *
@@ -1898,12 +1894,13 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1898 1894
1899 } 1895 }
1900 } 1896 }
1901 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
1902 1897
1903 repl = sctp_make_cookie_ack(new_asoc, chunk); 1898 repl = sctp_make_cookie_ack(new_asoc, chunk);
1904 if (!repl) 1899 if (!repl)
1905 goto nomem; 1900 goto nomem;
1906 1901
1902 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1903
1907 if (ev) 1904 if (ev)
1908 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 1905 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
1909 SCTP_ULPEVENT(ev)); 1906 SCTP_ULPEVENT(ev));
@@ -1911,9 +1908,6 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1911 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 1908 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
1912 SCTP_ULPEVENT(ai_ev)); 1909 SCTP_ULPEVENT(ai_ev));
1913 1910
1914 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1915 sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL());
1916
1917 return SCTP_DISPOSITION_CONSUME; 1911 return SCTP_DISPOSITION_CONSUME;
1918 1912
1919nomem: 1913nomem:
@@ -3970,9 +3964,6 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
3970 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3964 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3971 break; 3965 break;
3972 case SCTP_CID_ACTION_DISCARD_ERR: 3966 case SCTP_CID_ACTION_DISCARD_ERR:
3973 /* Discard the packet. */
3974 sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3975
3976 /* Generate an ERROR chunk as response. */ 3967 /* Generate an ERROR chunk as response. */
3977 hdr = unk_chunk->chunk_hdr; 3968 hdr = unk_chunk->chunk_hdr;
3978 err_chunk = sctp_make_op_error(asoc, unk_chunk, 3969 err_chunk = sctp_make_op_error(asoc, unk_chunk,
@@ -3982,6 +3973,9 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
3982 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3973 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
3983 SCTP_CHUNK(err_chunk)); 3974 SCTP_CHUNK(err_chunk));
3984 } 3975 }
3976
3977 /* Discard the packet. */
3978 sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3985 return SCTP_DISPOSITION_CONSUME; 3979 return SCTP_DISPOSITION_CONSUME;
3986 break; 3980 break;
3987 case SCTP_CID_ACTION_SKIP: 3981 case SCTP_CID_ACTION_SKIP:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0dbcde6758ea..df5572c39f0c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -956,7 +956,8 @@ out:
956 */ 956 */
957static int __sctp_connect(struct sock* sk, 957static int __sctp_connect(struct sock* sk,
958 struct sockaddr *kaddrs, 958 struct sockaddr *kaddrs,
959 int addrs_size) 959 int addrs_size,
960 sctp_assoc_t *assoc_id)
960{ 961{
961 struct sctp_sock *sp; 962 struct sctp_sock *sp;
962 struct sctp_endpoint *ep; 963 struct sctp_endpoint *ep;
@@ -1111,6 +1112,8 @@ static int __sctp_connect(struct sock* sk,
1111 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1112 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1112 1113
1113 err = sctp_wait_for_connect(asoc, &timeo); 1114 err = sctp_wait_for_connect(asoc, &timeo);
1115 if (!err && assoc_id)
1116 *assoc_id = asoc->assoc_id;
1114 1117
1115 /* Don't free association on exit. */ 1118 /* Don't free association on exit. */
1116 asoc = NULL; 1119 asoc = NULL;
@@ -1128,7 +1131,8 @@ out_free:
1128/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() 1131/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1129 * 1132 *
1130 * API 8.9 1133 * API 8.9
1131 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt); 1134 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1135 * sctp_assoc_t *asoc);
1132 * 1136 *
1133 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. 1137 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1134 * If the sd is an IPv6 socket, the addresses passed can either be IPv4 1138 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
@@ -1144,8 +1148,10 @@ out_free:
1144 * representation is termed a "packed array" of addresses). The caller 1148 * representation is termed a "packed array" of addresses). The caller
1145 * specifies the number of addresses in the array with addrcnt. 1149 * specifies the number of addresses in the array with addrcnt.
1146 * 1150 *
1147 * On success, sctp_connectx() returns 0. On failure, sctp_connectx() returns 1151 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1148 * -1, and sets errno to the appropriate error code. 1152 * the association id of the new association. On failure, sctp_connectx()
1153 * returns -1, and sets errno to the appropriate error code. The assoc_id
1154 * is not touched by the kernel.
1149 * 1155 *
1150 * For SCTP, the port given in each socket address must be the same, or 1156 * For SCTP, the port given in each socket address must be the same, or
1151 * sctp_connectx() will fail, setting errno to EINVAL. 1157 * sctp_connectx() will fail, setting errno to EINVAL.
@@ -1182,11 +1188,12 @@ out_free:
1182 * addrs The pointer to the addresses in user land 1188 * addrs The pointer to the addresses in user land
1183 * addrssize Size of the addrs buffer 1189 * addrssize Size of the addrs buffer
1184 * 1190 *
1185 * Returns 0 if ok, <0 errno code on error. 1191 * Returns >=0 if ok, <0 errno code on error.
1186 */ 1192 */
1187SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, 1193SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk,
1188 struct sockaddr __user *addrs, 1194 struct sockaddr __user *addrs,
1189 int addrs_size) 1195 int addrs_size,
1196 sctp_assoc_t *assoc_id)
1190{ 1197{
1191 int err = 0; 1198 int err = 0;
1192 struct sockaddr *kaddrs; 1199 struct sockaddr *kaddrs;
@@ -1209,13 +1216,46 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1209 if (__copy_from_user(kaddrs, addrs, addrs_size)) { 1216 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1210 err = -EFAULT; 1217 err = -EFAULT;
1211 } else { 1218 } else {
1212 err = __sctp_connect(sk, kaddrs, addrs_size); 1219 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1213 } 1220 }
1214 1221
1215 kfree(kaddrs); 1222 kfree(kaddrs);
1223
1216 return err; 1224 return err;
1217} 1225}
1218 1226
1227/*
1228 * This is an older interface. It's kept for backward compatibility
1229 * to the option that doesn't provide association id.
1230 */
1231SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk,
1232 struct sockaddr __user *addrs,
1233 int addrs_size)
1234{
1235 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1236}
1237
1238/*
1239 * New interface for the API. The since the API is done with a socket
1240 * option, to make it simple we feed back the association id is as a return
1241 * indication to the call. Error is always negative and association id is
1242 * always positive.
1243 */
1244SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1245 struct sockaddr __user *addrs,
1246 int addrs_size)
1247{
1248 sctp_assoc_t assoc_id = 0;
1249 int err = 0;
1250
1251 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1252
1253 if (err)
1254 return err;
1255 else
1256 return assoc_id;
1257}
1258
1219/* API 3.1.4 close() - UDP Style Syntax 1259/* API 3.1.4 close() - UDP Style Syntax
1220 * Applications use close() to perform graceful shutdown (as described in 1260 * Applications use close() to perform graceful shutdown (as described in
1221 * Section 10.1 of [SCTP]) on ALL the associations currently represented 1261 * Section 10.1 of [SCTP]) on ALL the associations currently represented
@@ -2305,74 +2345,98 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2305 return 0; 2345 return 0;
2306} 2346}
2307 2347
2308/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) 2348/*
2309 * 2349 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2310 * This options will get or set the delayed ack timer. The time is set 2350 *
2311 * in milliseconds. If the assoc_id is 0, then this sets or gets the 2351 * This option will effect the way delayed acks are performed. This
2312 * endpoints default delayed ack timer value. If the assoc_id field is 2352 * option allows you to get or set the delayed ack time, in
2313 * non-zero, then the set or get effects the specified association. 2353 * milliseconds. It also allows changing the delayed ack frequency.
2314 * 2354 * Changing the frequency to 1 disables the delayed sack algorithm. If
2315 * struct sctp_assoc_value { 2355 * the assoc_id is 0, then this sets or gets the endpoints default
2316 * sctp_assoc_t assoc_id; 2356 * values. If the assoc_id field is non-zero, then the set or get
2317 * uint32_t assoc_value; 2357 * effects the specified association for the one to many model (the
2318 * }; 2358 * assoc_id field is ignored by the one to one model). Note that if
2359 * sack_delay or sack_freq are 0 when setting this option, then the
2360 * current values will remain unchanged.
2361 *
2362 * struct sctp_sack_info {
2363 * sctp_assoc_t sack_assoc_id;
2364 * uint32_t sack_delay;
2365 * uint32_t sack_freq;
2366 * };
2319 * 2367 *
2320 * assoc_id - This parameter, indicates which association the 2368 * sack_assoc_id - This parameter, indicates which association the user
2321 * user is preforming an action upon. Note that if 2369 * is performing an action upon. Note that if this field's value is
2322 * this field's value is zero then the endpoints 2370 * zero then the endpoints default value is changed (effecting future
2323 * default value is changed (effecting future 2371 * associations only).
2324 * associations only).
2325 * 2372 *
2326 * assoc_value - This parameter contains the number of milliseconds 2373 * sack_delay - This parameter contains the number of milliseconds that
2327 * that the user is requesting the delayed ACK timer 2374 * the user is requesting the delayed ACK timer be set to. Note that
2328 * be set to. Note that this value is defined in 2375 * this value is defined in the standard to be between 200 and 500
2329 * the standard to be between 200 and 500 milliseconds. 2376 * milliseconds.
2330 * 2377 *
2331 * Note: a value of zero will leave the value alone, 2378 * sack_freq - This parameter contains the number of packets that must
2332 * but disable SACK delay. A non-zero value will also 2379 * be received before a sack is sent without waiting for the delay
2333 * enable SACK delay. 2380 * timer to expire. The default value for this is 2, setting this
2381 * value to 1 will disable the delayed sack algorithm.
2334 */ 2382 */
2335 2383
2336static int sctp_setsockopt_delayed_ack_time(struct sock *sk, 2384static int sctp_setsockopt_delayed_ack(struct sock *sk,
2337 char __user *optval, int optlen) 2385 char __user *optval, int optlen)
2338{ 2386{
2339 struct sctp_assoc_value params; 2387 struct sctp_sack_info params;
2340 struct sctp_transport *trans = NULL; 2388 struct sctp_transport *trans = NULL;
2341 struct sctp_association *asoc = NULL; 2389 struct sctp_association *asoc = NULL;
2342 struct sctp_sock *sp = sctp_sk(sk); 2390 struct sctp_sock *sp = sctp_sk(sk);
2343 2391
2344 if (optlen != sizeof(struct sctp_assoc_value)) 2392 if (optlen == sizeof(struct sctp_sack_info)) {
2345 return - EINVAL; 2393 if (copy_from_user(&params, optval, optlen))
2394 return -EFAULT;
2346 2395
2347 if (copy_from_user(&params, optval, optlen)) 2396 if (params.sack_delay == 0 && params.sack_freq == 0)
2348 return -EFAULT; 2397 return 0;
2398 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2399 printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info "
2400 "in delayed_ack socket option deprecated\n");
2401 printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n");
2402 if (copy_from_user(&params, optval, optlen))
2403 return -EFAULT;
2404
2405 if (params.sack_delay == 0)
2406 params.sack_freq = 1;
2407 else
2408 params.sack_freq = 0;
2409 } else
2410 return - EINVAL;
2349 2411
2350 /* Validate value parameter. */ 2412 /* Validate value parameter. */
2351 if (params.assoc_value > 500) 2413 if (params.sack_delay > 500)
2352 return -EINVAL; 2414 return -EINVAL;
2353 2415
2354 /* Get association, if assoc_id != 0 and the socket is a one 2416 /* Get association, if sack_assoc_id != 0 and the socket is a one
2355 * to many style socket, and an association was not found, then 2417 * to many style socket, and an association was not found, then
2356 * the id was invalid. 2418 * the id was invalid.
2357 */ 2419 */
2358 asoc = sctp_id2assoc(sk, params.assoc_id); 2420 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2359 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 2421 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2360 return -EINVAL; 2422 return -EINVAL;
2361 2423
2362 if (params.assoc_value) { 2424 if (params.sack_delay) {
2363 if (asoc) { 2425 if (asoc) {
2364 asoc->sackdelay = 2426 asoc->sackdelay =
2365 msecs_to_jiffies(params.assoc_value); 2427 msecs_to_jiffies(params.sack_delay);
2366 asoc->param_flags = 2428 asoc->param_flags =
2367 (asoc->param_flags & ~SPP_SACKDELAY) | 2429 (asoc->param_flags & ~SPP_SACKDELAY) |
2368 SPP_SACKDELAY_ENABLE; 2430 SPP_SACKDELAY_ENABLE;
2369 } else { 2431 } else {
2370 sp->sackdelay = params.assoc_value; 2432 sp->sackdelay = params.sack_delay;
2371 sp->param_flags = 2433 sp->param_flags =
2372 (sp->param_flags & ~SPP_SACKDELAY) | 2434 (sp->param_flags & ~SPP_SACKDELAY) |
2373 SPP_SACKDELAY_ENABLE; 2435 SPP_SACKDELAY_ENABLE;
2374 } 2436 }
2375 } else { 2437 }
2438
2439 if (params.sack_freq == 1) {
2376 if (asoc) { 2440 if (asoc) {
2377 asoc->param_flags = 2441 asoc->param_flags =
2378 (asoc->param_flags & ~SPP_SACKDELAY) | 2442 (asoc->param_flags & ~SPP_SACKDELAY) |
@@ -2382,22 +2446,40 @@ static int sctp_setsockopt_delayed_ack_time(struct sock *sk,
2382 (sp->param_flags & ~SPP_SACKDELAY) | 2446 (sp->param_flags & ~SPP_SACKDELAY) |
2383 SPP_SACKDELAY_DISABLE; 2447 SPP_SACKDELAY_DISABLE;
2384 } 2448 }
2449 } else if (params.sack_freq > 1) {
2450 if (asoc) {
2451 asoc->sackfreq = params.sack_freq;
2452 asoc->param_flags =
2453 (asoc->param_flags & ~SPP_SACKDELAY) |
2454 SPP_SACKDELAY_ENABLE;
2455 } else {
2456 sp->sackfreq = params.sack_freq;
2457 sp->param_flags =
2458 (sp->param_flags & ~SPP_SACKDELAY) |
2459 SPP_SACKDELAY_ENABLE;
2460 }
2385 } 2461 }
2386 2462
2387 /* If change is for association, also apply to each transport. */ 2463 /* If change is for association, also apply to each transport. */
2388 if (asoc) { 2464 if (asoc) {
2389 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 2465 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2390 transports) { 2466 transports) {
2391 if (params.assoc_value) { 2467 if (params.sack_delay) {
2392 trans->sackdelay = 2468 trans->sackdelay =
2393 msecs_to_jiffies(params.assoc_value); 2469 msecs_to_jiffies(params.sack_delay);
2394 trans->param_flags = 2470 trans->param_flags =
2395 (trans->param_flags & ~SPP_SACKDELAY) | 2471 (trans->param_flags & ~SPP_SACKDELAY) |
2396 SPP_SACKDELAY_ENABLE; 2472 SPP_SACKDELAY_ENABLE;
2397 } else { 2473 }
2474 if (params.sack_freq == 1) {
2398 trans->param_flags = 2475 trans->param_flags =
2399 (trans->param_flags & ~SPP_SACKDELAY) | 2476 (trans->param_flags & ~SPP_SACKDELAY) |
2400 SPP_SACKDELAY_DISABLE; 2477 SPP_SACKDELAY_DISABLE;
2478 } else if (params.sack_freq > 1) {
2479 trans->sackfreq = params.sack_freq;
2480 trans->param_flags =
2481 (trans->param_flags & ~SPP_SACKDELAY) |
2482 SPP_SACKDELAY_ENABLE;
2401 } 2483 }
2402 } 2484 }
2403 } 2485 }
@@ -3164,10 +3246,18 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3164 optlen, SCTP_BINDX_REM_ADDR); 3246 optlen, SCTP_BINDX_REM_ADDR);
3165 break; 3247 break;
3166 3248
3249 case SCTP_SOCKOPT_CONNECTX_OLD:
3250 /* 'optlen' is the size of the addresses buffer. */
3251 retval = sctp_setsockopt_connectx_old(sk,
3252 (struct sockaddr __user *)optval,
3253 optlen);
3254 break;
3255
3167 case SCTP_SOCKOPT_CONNECTX: 3256 case SCTP_SOCKOPT_CONNECTX:
3168 /* 'optlen' is the size of the addresses buffer. */ 3257 /* 'optlen' is the size of the addresses buffer. */
3169 retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, 3258 retval = sctp_setsockopt_connectx(sk,
3170 optlen); 3259 (struct sockaddr __user *)optval,
3260 optlen);
3171 break; 3261 break;
3172 3262
3173 case SCTP_DISABLE_FRAGMENTS: 3263 case SCTP_DISABLE_FRAGMENTS:
@@ -3186,8 +3276,8 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3186 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); 3276 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3187 break; 3277 break;
3188 3278
3189 case SCTP_DELAYED_ACK_TIME: 3279 case SCTP_DELAYED_ACK:
3190 retval = sctp_setsockopt_delayed_ack_time(sk, optval, optlen); 3280 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3191 break; 3281 break;
3192 case SCTP_PARTIAL_DELIVERY_POINT: 3282 case SCTP_PARTIAL_DELIVERY_POINT:
3193 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); 3283 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
@@ -3294,7 +3384,7 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr,
3294 /* Pass correct addr len to common routine (so it knows there 3384 /* Pass correct addr len to common routine (so it knows there
3295 * is only one address being passed. 3385 * is only one address being passed.
3296 */ 3386 */
3297 err = __sctp_connect(sk, addr, af->sockaddr_len); 3387 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
3298 } 3388 }
3299 3389
3300 sctp_release_sock(sk); 3390 sctp_release_sock(sk);
@@ -3446,6 +3536,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3446 sp->pathmaxrxt = sctp_max_retrans_path; 3536 sp->pathmaxrxt = sctp_max_retrans_path;
3447 sp->pathmtu = 0; // allow default discovery 3537 sp->pathmtu = 0; // allow default discovery
3448 sp->sackdelay = sctp_sack_timeout; 3538 sp->sackdelay = sctp_sack_timeout;
3539 sp->sackfreq = 2;
3449 sp->param_flags = SPP_HB_ENABLE | 3540 sp->param_flags = SPP_HB_ENABLE |
3450 SPP_PMTUD_ENABLE | 3541 SPP_PMTUD_ENABLE |
3451 SPP_SACKDELAY_ENABLE; 3542 SPP_SACKDELAY_ENABLE;
@@ -3497,7 +3588,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3497} 3588}
3498 3589
3499/* Cleanup any SCTP per socket resources. */ 3590/* Cleanup any SCTP per socket resources. */
3500SCTP_STATIC int sctp_destroy_sock(struct sock *sk) 3591SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3501{ 3592{
3502 struct sctp_endpoint *ep; 3593 struct sctp_endpoint *ep;
3503 3594
@@ -3507,7 +3598,6 @@ SCTP_STATIC int sctp_destroy_sock(struct sock *sk)
3507 ep = sctp_sk(sk)->ep; 3598 ep = sctp_sk(sk)->ep;
3508 sctp_endpoint_free(ep); 3599 sctp_endpoint_free(ep);
3509 atomic_dec(&sctp_sockets_allocated); 3600 atomic_dec(&sctp_sockets_allocated);
3510 return 0;
3511} 3601}
3512 3602
3513/* API 4.1.7 shutdown() - TCP Style Syntax 3603/* API 4.1.7 shutdown() - TCP Style Syntax
@@ -3999,70 +4089,91 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
3999 return 0; 4089 return 0;
4000} 4090}
4001 4091
4002/* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) 4092/*
4003 * 4093 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
4004 * This options will get or set the delayed ack timer. The time is set 4094 *
4005 * in milliseconds. If the assoc_id is 0, then this sets or gets the 4095 * This option will effect the way delayed acks are performed. This
4006 * endpoints default delayed ack timer value. If the assoc_id field is 4096 * option allows you to get or set the delayed ack time, in
4007 * non-zero, then the set or get effects the specified association. 4097 * milliseconds. It also allows changing the delayed ack frequency.
4008 * 4098 * Changing the frequency to 1 disables the delayed sack algorithm. If
4009 * struct sctp_assoc_value { 4099 * the assoc_id is 0, then this sets or gets the endpoints default
4010 * sctp_assoc_t assoc_id; 4100 * values. If the assoc_id field is non-zero, then the set or get
4011 * uint32_t assoc_value; 4101 * effects the specified association for the one to many model (the
4012 * }; 4102 * assoc_id field is ignored by the one to one model). Note that if
4103 * sack_delay or sack_freq are 0 when setting this option, then the
4104 * current values will remain unchanged.
4105 *
4106 * struct sctp_sack_info {
4107 * sctp_assoc_t sack_assoc_id;
4108 * uint32_t sack_delay;
4109 * uint32_t sack_freq;
4110 * };
4013 * 4111 *
4014 * assoc_id - This parameter, indicates which association the 4112 * sack_assoc_id - This parameter, indicates which association the user
4015 * user is preforming an action upon. Note that if 4113 * is performing an action upon. Note that if this field's value is
4016 * this field's value is zero then the endpoints 4114 * zero then the endpoints default value is changed (effecting future
4017 * default value is changed (effecting future 4115 * associations only).
4018 * associations only).
4019 * 4116 *
4020 * assoc_value - This parameter contains the number of milliseconds 4117 * sack_delay - This parameter contains the number of milliseconds that
4021 * that the user is requesting the delayed ACK timer 4118 * the user is requesting the delayed ACK timer be set to. Note that
4022 * be set to. Note that this value is defined in 4119 * this value is defined in the standard to be between 200 and 500
4023 * the standard to be between 200 and 500 milliseconds. 4120 * milliseconds.
4024 * 4121 *
4025 * Note: a value of zero will leave the value alone, 4122 * sack_freq - This parameter contains the number of packets that must
4026 * but disable SACK delay. A non-zero value will also 4123 * be received before a sack is sent without waiting for the delay
4027 * enable SACK delay. 4124 * timer to expire. The default value for this is 2, setting this
4125 * value to 1 will disable the delayed sack algorithm.
4028 */ 4126 */
4029static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len, 4127static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4030 char __user *optval, 4128 char __user *optval,
4031 int __user *optlen) 4129 int __user *optlen)
4032{ 4130{
4033 struct sctp_assoc_value params; 4131 struct sctp_sack_info params;
4034 struct sctp_association *asoc = NULL; 4132 struct sctp_association *asoc = NULL;
4035 struct sctp_sock *sp = sctp_sk(sk); 4133 struct sctp_sock *sp = sctp_sk(sk);
4036 4134
4037 if (len < sizeof(struct sctp_assoc_value)) 4135 if (len >= sizeof(struct sctp_sack_info)) {
4038 return - EINVAL; 4136 len = sizeof(struct sctp_sack_info);
4039
4040 len = sizeof(struct sctp_assoc_value);
4041 4137
4042 if (copy_from_user(&params, optval, len)) 4138 if (copy_from_user(&params, optval, len))
4043 return -EFAULT; 4139 return -EFAULT;
4140 } else if (len == sizeof(struct sctp_assoc_value)) {
4141 printk(KERN_WARNING "SCTP: Use of struct sctp_sack_info "
4142 "in delayed_ack socket option deprecated\n");
4143 printk(KERN_WARNING "SCTP: struct sctp_sack_info instead\n");
4144 if (copy_from_user(&params, optval, len))
4145 return -EFAULT;
4146 } else
4147 return - EINVAL;
4044 4148
4045 /* Get association, if assoc_id != 0 and the socket is a one 4149 /* Get association, if sack_assoc_id != 0 and the socket is a one
4046 * to many style socket, and an association was not found, then 4150 * to many style socket, and an association was not found, then
4047 * the id was invalid. 4151 * the id was invalid.
4048 */ 4152 */
4049 asoc = sctp_id2assoc(sk, params.assoc_id); 4153 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
4050 if (!asoc && params.assoc_id && sctp_style(sk, UDP)) 4154 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
4051 return -EINVAL; 4155 return -EINVAL;
4052 4156
4053 if (asoc) { 4157 if (asoc) {
4054 /* Fetch association values. */ 4158 /* Fetch association values. */
4055 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) 4159 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
4056 params.assoc_value = jiffies_to_msecs( 4160 params.sack_delay = jiffies_to_msecs(
4057 asoc->sackdelay); 4161 asoc->sackdelay);
4058 else 4162 params.sack_freq = asoc->sackfreq;
4059 params.assoc_value = 0; 4163
4164 } else {
4165 params.sack_delay = 0;
4166 params.sack_freq = 1;
4167 }
4060 } else { 4168 } else {
4061 /* Fetch socket values. */ 4169 /* Fetch socket values. */
4062 if (sp->param_flags & SPP_SACKDELAY_ENABLE) 4170 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
4063 params.assoc_value = sp->sackdelay; 4171 params.sack_delay = sp->sackdelay;
4064 else 4172 params.sack_freq = sp->sackfreq;
4065 params.assoc_value = 0; 4173 } else {
4174 params.sack_delay = 0;
4175 params.sack_freq = 1;
4176 }
4066 } 4177 }
4067 4178
4068 if (copy_to_user(optval, &params, len)) 4179 if (copy_to_user(optval, &params, len))
@@ -4112,6 +4223,8 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
4112 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) 4223 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4113 return -EFAULT; 4224 return -EFAULT;
4114 4225
4226 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_NUM_OLD "
4227 "socket option deprecated\n");
4115 /* For UDP-style sockets, id specifies the association to query. */ 4228 /* For UDP-style sockets, id specifies the association to query. */
4116 asoc = sctp_id2assoc(sk, id); 4229 asoc = sctp_id2assoc(sk, id);
4117 if (!asoc) 4230 if (!asoc)
@@ -4151,6 +4264,9 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
4151 4264
4152 if (getaddrs.addr_num <= 0) return -EINVAL; 4265 if (getaddrs.addr_num <= 0) return -EINVAL;
4153 4266
4267 printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_OLD "
4268 "socket option deprecated\n");
4269
4154 /* For UDP-style sockets, id specifies the association to query. */ 4270 /* For UDP-style sockets, id specifies the association to query. */
4155 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4271 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4156 if (!asoc) 4272 if (!asoc)
@@ -4244,6 +4360,9 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4244 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) 4360 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
4245 return -EFAULT; 4361 return -EFAULT;
4246 4362
4363 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_NUM_OLD "
4364 "socket option deprecated\n");
4365
4247 /* 4366 /*
4248 * For UDP-style sockets, id specifies the association to query. 4367 * For UDP-style sockets, id specifies the association to query.
4249 * If the id field is set to the value '0' then the locally bound 4368 * If the id field is set to the value '0' then the locally bound
@@ -4404,6 +4523,10 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4404 if (getaddrs.addr_num <= 0 || 4523 if (getaddrs.addr_num <= 0 ||
4405 getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr))) 4524 getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
4406 return -EINVAL; 4525 return -EINVAL;
4526
4527 printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_OLD "
4528 "socket option deprecated\n");
4529
4407 /* 4530 /*
4408 * For UDP-style sockets, id specifies the association to query. 4531 * For UDP-style sockets, id specifies the association to query.
4409 * If the id field is set to the value '0' then the locally bound 4532 * If the id field is set to the value '0' then the locally bound
@@ -5220,8 +5343,8 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5220 retval = sctp_getsockopt_peer_addr_params(sk, len, optval, 5343 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
5221 optlen); 5344 optlen);
5222 break; 5345 break;
5223 case SCTP_DELAYED_ACK_TIME: 5346 case SCTP_DELAYED_ACK:
5224 retval = sctp_getsockopt_delayed_ack_time(sk, len, optval, 5347 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
5225 optlen); 5348 optlen);
5226 break; 5349 break;
5227 case SCTP_INITMSG: 5350 case SCTP_INITMSG:
diff --git a/net/socket.c b/net/socket.c
index 66c4a8cf6db9..81fe82513046 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -90,6 +90,7 @@
90#include <asm/unistd.h> 90#include <asm/unistd.h>
91 91
92#include <net/compat.h> 92#include <net/compat.h>
93#include <net/wext.h>
93 94
94#include <net/sock.h> 95#include <net/sock.h>
95#include <linux/netfilter.h> 96#include <linux/netfilter.h>
@@ -2210,10 +2211,19 @@ static long compat_sock_ioctl(struct file *file, unsigned cmd,
2210{ 2211{
2211 struct socket *sock = file->private_data; 2212 struct socket *sock = file->private_data;
2212 int ret = -ENOIOCTLCMD; 2213 int ret = -ENOIOCTLCMD;
2214 struct sock *sk;
2215 struct net *net;
2216
2217 sk = sock->sk;
2218 net = sock_net(sk);
2213 2219
2214 if (sock->ops->compat_ioctl) 2220 if (sock->ops->compat_ioctl)
2215 ret = sock->ops->compat_ioctl(sock, cmd, arg); 2221 ret = sock->ops->compat_ioctl(sock, cmd, arg);
2216 2222
2223 if (ret == -ENOIOCTLCMD &&
2224 (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
2225 ret = compat_wext_handle_ioctl(net, cmd, arg);
2226
2217 return ret; 2227 return ret;
2218} 2228}
2219#endif 2229#endif
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index cc12d5f5d5da..019d4b4478c9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -33,8 +33,6 @@
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * $Id$
38 */ 36 */
39 37
40 38
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index b4f0525f91af..007c1a6708ee 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -4,7 +4,6 @@
4 * Begun April 1, 1996, Mike Shaver. 4 * Begun April 1, 1996, Mike Shaver.
5 * Added /proc/sys/net directories for each protocol family. [MS] 5 * Added /proc/sys/net directories for each protocol family. [MS]
6 * 6 *
7 * $Log: sysctl_net.c,v $
8 * Revision 1.2 1996/05/08 20:24:40 shaver 7 * Revision 1.2 1996/05/08 20:24:40 shaver
9 * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and 8 * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and
10 * NET_IPV4_IP_FORWARD. 9 * NET_IPV4_IP_FORWARD.
@@ -40,6 +39,27 @@ static struct ctl_table_root net_sysctl_root = {
40 .lookup = net_ctl_header_lookup, 39 .lookup = net_ctl_header_lookup,
41}; 40};
42 41
42static LIST_HEAD(net_sysctl_ro_tables);
43static struct list_head *net_ctl_ro_header_lookup(struct ctl_table_root *root,
44 struct nsproxy *namespaces)
45{
46 return &net_sysctl_ro_tables;
47}
48
49static int net_ctl_ro_header_perms(struct ctl_table_root *root,
50 struct nsproxy *namespaces, struct ctl_table *table)
51{
52 if (namespaces->net_ns == &init_net)
53 return table->mode;
54 else
55 return table->mode & ~0222;
56}
57
58static struct ctl_table_root net_sysctl_ro_root = {
59 .lookup = net_ctl_ro_header_lookup,
60 .permissions = net_ctl_ro_header_perms,
61};
62
43static int sysctl_net_init(struct net *net) 63static int sysctl_net_init(struct net *net)
44{ 64{
45 INIT_LIST_HEAD(&net->sysctl_table_headers); 65 INIT_LIST_HEAD(&net->sysctl_table_headers);
@@ -64,6 +84,7 @@ static __init int sysctl_init(void)
64 if (ret) 84 if (ret)
65 goto out; 85 goto out;
66 register_sysctl_root(&net_sysctl_root); 86 register_sysctl_root(&net_sysctl_root);
87 register_sysctl_root(&net_sysctl_ro_root);
67out: 88out:
68 return ret; 89 return ret;
69} 90}
@@ -80,6 +101,14 @@ struct ctl_table_header *register_net_sysctl_table(struct net *net,
80} 101}
81EXPORT_SYMBOL_GPL(register_net_sysctl_table); 102EXPORT_SYMBOL_GPL(register_net_sysctl_table);
82 103
104struct ctl_table_header *register_net_sysctl_rotable(const
105 struct ctl_path *path, struct ctl_table *table)
106{
107 return __register_sysctl_paths(&net_sysctl_ro_root,
108 &init_nsproxy, path, table);
109}
110EXPORT_SYMBOL_GPL(register_net_sysctl_rotable);
111
83void unregister_net_sysctl_table(struct ctl_table_header *header) 112void unregister_net_sysctl_table(struct ctl_table_header *header)
84{ 113{
85 unregister_sysctl_table(header); 114 unregister_sysctl_table(header);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e7880172ef19..a5883b1452ff 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -276,7 +276,7 @@ static void bclink_send_nack(struct node *n_ptr)
276 if (buf) { 276 if (buf) {
277 msg = buf_msg(buf); 277 msg = buf_msg(buf);
278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
279 TIPC_OK, INT_H_SIZE, n_ptr->addr); 279 INT_H_SIZE, n_ptr->addr);
280 msg_set_mc_netid(msg, tipc_net_id); 280 msg_set_mc_netid(msg, tipc_net_id);
281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
@@ -571,7 +571,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
571 assert(tipc_cltr_bcast_nodes.count != 0); 571 assert(tipc_cltr_bcast_nodes.count != 0);
572 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count); 572 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
573 msg = buf_msg(buf); 573 msg = buf_msg(buf);
574 msg_set_non_seq(msg); 574 msg_set_non_seq(msg, 1);
575 msg_set_mc_netid(msg, tipc_net_id); 575 msg_set_mc_netid(msg, tipc_net_id);
576 } 576 }
577 577
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 4bb3404f610b..bc1db474fe01 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -238,7 +238,7 @@ static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
238 if (buf) { 238 if (buf) {
239 msg = buf_msg(buf); 239 msg = buf_msg(buf);
240 memset((char *)msg, 0, size); 240 memset((char *)msg, 0, size);
241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest); 241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
242 } 242 }
243 return buf; 243 return buf;
244} 244}
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c71337a22d33..ca3544d030c7 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -2,7 +2,7 @@
2 * net/tipc/config.c: TIPC configuration management code 2 * net/tipc/config.c: TIPC configuration management code
3 * 3 *
4 * Copyright (c) 2002-2006, Ericsson AB 4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2006, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -293,7 +293,6 @@ static struct sk_buff *cfg_set_own_addr(void)
293 if (tipc_mode == TIPC_NET_MODE) 293 if (tipc_mode == TIPC_NET_MODE)
294 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 294 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
295 " (cannot change node address once assigned)"); 295 " (cannot change node address once assigned)");
296 tipc_own_addr = addr;
297 296
298 /* 297 /*
299 * Must release all spinlocks before calling start_net() because 298 * Must release all spinlocks before calling start_net() because
@@ -306,7 +305,7 @@ static struct sk_buff *cfg_set_own_addr(void)
306 */ 305 */
307 306
308 spin_unlock_bh(&config_lock); 307 spin_unlock_bh(&config_lock);
309 tipc_core_start_net(); 308 tipc_core_start_net(addr);
310 spin_lock_bh(&config_lock); 309 spin_lock_bh(&config_lock);
311 return tipc_cfg_reply_none(); 310 return tipc_cfg_reply_none();
312} 311}
@@ -529,7 +528,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
529 break; 528 break;
530#endif 529#endif
531 case TIPC_CMD_SET_LOG_SIZE: 530 case TIPC_CMD_SET_LOG_SIZE:
532 rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space); 531 rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space);
533 break; 532 break;
534 case TIPC_CMD_DUMP_LOG: 533 case TIPC_CMD_DUMP_LOG:
535 rep_tlv_buf = tipc_log_dump(); 534 rep_tlv_buf = tipc_log_dump();
@@ -602,6 +601,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
602 case TIPC_CMD_GET_NETID: 601 case TIPC_CMD_GET_NETID:
603 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); 602 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
604 break; 603 break;
604 case TIPC_CMD_NOT_NET_ADMIN:
605 rep_tlv_buf =
606 tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
607 break;
605 default: 608 default:
606 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 609 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
607 " (unknown command)"); 610 " (unknown command)");
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 740aac5cdfb6..3256bd7d398f 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -49,7 +49,7 @@
49#include "config.h" 49#include "config.h"
50 50
51 51
52#define TIPC_MOD_VER "1.6.3" 52#define TIPC_MOD_VER "1.6.4"
53 53
54#ifndef CONFIG_TIPC_ZONES 54#ifndef CONFIG_TIPC_ZONES
55#define CONFIG_TIPC_ZONES 3 55#define CONFIG_TIPC_ZONES 3
@@ -117,11 +117,11 @@ void tipc_core_stop_net(void)
117 * start_net - start TIPC networking sub-systems 117 * start_net - start TIPC networking sub-systems
118 */ 118 */
119 119
120int tipc_core_start_net(void) 120int tipc_core_start_net(unsigned long addr)
121{ 121{
122 int res; 122 int res;
123 123
124 if ((res = tipc_net_start()) || 124 if ((res = tipc_net_start(addr)) ||
125 (res = tipc_eth_media_start())) { 125 (res = tipc_eth_media_start())) {
126 tipc_core_stop_net(); 126 tipc_core_stop_net();
127 } 127 }
@@ -164,8 +164,7 @@ int tipc_core_start(void)
164 tipc_mode = TIPC_NODE_MODE; 164 tipc_mode = TIPC_NODE_MODE;
165 165
166 if ((res = tipc_handler_start()) || 166 if ((res = tipc_handler_start()) ||
167 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions, 167 (res = tipc_ref_table_init(tipc_max_ports, tipc_random)) ||
168 tipc_random)) ||
169 (res = tipc_reg_start()) || 168 (res = tipc_reg_start()) ||
170 (res = tipc_nametbl_init()) || 169 (res = tipc_nametbl_init()) ||
171 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) || 170 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
@@ -182,7 +181,7 @@ static int __init tipc_init(void)
182{ 181{
183 int res; 182 int res;
184 183
185 tipc_log_reinit(CONFIG_TIPC_LOG); 184 tipc_log_resize(CONFIG_TIPC_LOG);
186 info("Activated (version " TIPC_MOD_VER 185 info("Activated (version " TIPC_MOD_VER
187 " compiled " __DATE__ " " __TIME__ ")\n"); 186 " compiled " __DATE__ " " __TIME__ ")\n");
188 187
@@ -209,7 +208,7 @@ static void __exit tipc_exit(void)
209 tipc_core_stop_net(); 208 tipc_core_stop_net();
210 tipc_core_stop(); 209 tipc_core_stop();
211 info("Deactivated\n"); 210 info("Deactivated\n");
212 tipc_log_stop(); 211 tipc_log_resize(0);
213} 212}
214 213
215module_init(tipc_init); 214module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5a0e4878d3b7..a881f92a8537 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -2,7 +2,7 @@
2 * net/tipc/core.h: Include file for TIPC global declarations 2 * net/tipc/core.h: Include file for TIPC global declarations
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -59,84 +59,108 @@
59#include <linux/vmalloc.h> 59#include <linux/vmalloc.h>
60 60
61/* 61/*
62 * TIPC debugging code 62 * TIPC sanity test macros
63 */ 63 */
64 64
65#define assert(i) BUG_ON(!(i)) 65#define assert(i) BUG_ON(!(i))
66 66
67struct tipc_msg;
68extern struct print_buf *TIPC_NULL, *TIPC_CONS, *TIPC_LOG;
69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
71void tipc_printf(struct print_buf *, const char *fmt, ...);
72void tipc_dump(struct print_buf*,const char *fmt, ...);
73
74#ifdef CONFIG_TIPC_DEBUG
75
76/* 67/*
77 * TIPC debug support included: 68 * TIPC system monitoring code
78 * - system messages are printed to TIPC_OUTPUT print buffer
79 * - debug messages are printed to DBG_OUTPUT print buffer
80 */ 69 */
81 70
82#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg) 71/*
83#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg) 72 * TIPC's print buffer subsystem supports the following print buffers:
84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg) 73 *
74 * TIPC_NULL : null buffer (i.e. print nowhere)
75 * TIPC_CONS : system console
76 * TIPC_LOG : TIPC log buffer
77 * &buf : user-defined buffer (struct print_buf *)
78 *
79 * Note: TIPC_LOG is configured to echo its output to the system console;
80 * user-defined buffers can be configured to do the same thing.
81 */
85 82
86#define dbg(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0) 83extern struct print_buf *const TIPC_NULL;
87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT != TIPC_NULL) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0) 84extern struct print_buf *const TIPC_CONS;
88#define dump(fmt, arg...) do {if (DBG_OUTPUT != TIPC_NULL) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) 85extern struct print_buf *const TIPC_LOG;
89 86
87void tipc_printf(struct print_buf *, const char *fmt, ...);
90 88
91/* 89/*
92 * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer, 90 * TIPC_OUTPUT is the destination print buffer for system messages.
93 * while DBG_OUTPUT is the null print buffer. These defaults can be changed
94 * here, or on a per .c file basis, by redefining these symbols. The following
95 * print buffer options are available:
96 *
97 * TIPC_NULL : null buffer (i.e. print nowhere)
98 * TIPC_CONS : system console
99 * TIPC_LOG : TIPC log buffer
100 * &buf : user-defined buffer (struct print_buf *)
101 * TIPC_TEE(&buf_a,&buf_b) : list of buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG))
102 */ 91 */
103 92
104#ifndef TIPC_OUTPUT 93#ifndef TIPC_OUTPUT
105#define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG) 94#define TIPC_OUTPUT TIPC_LOG
106#endif
107
108#ifndef DBG_OUTPUT
109#define DBG_OUTPUT TIPC_NULL
110#endif 95#endif
111 96
112#else
113
114/* 97/*
115 * TIPC debug support not included: 98 * TIPC can be configured to send system messages to TIPC_OUTPUT
116 * - system messages are printed to system console 99 * or to the system console only.
117 * - debug messages are not printed
118 */ 100 */
119 101
102#ifdef CONFIG_TIPC_DEBUG
103
104#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
105 KERN_ERR "TIPC: " fmt, ## arg)
106#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
107 KERN_WARNING "TIPC: " fmt, ## arg)
108#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
109 KERN_NOTICE "TIPC: " fmt, ## arg)
110
111#else
112
120#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg) 113#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
121#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg) 114#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
122#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg) 115#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
123 116
124#define dbg(fmt, arg...) do {} while (0) 117#endif
125#define msg_dbg(msg,txt) do {} while (0)
126#define dump(fmt,arg...) do {} while (0)
127 118
119/*
120 * DBG_OUTPUT is the destination print buffer for debug messages.
121 * It defaults to the the null print buffer, but can be redefined
122 * (typically in the individual .c files being debugged) to allow
123 * selected debug messages to be generated where needed.
124 */
125
126#ifndef DBG_OUTPUT
127#define DBG_OUTPUT TIPC_NULL
128#endif
128 129
129/* 130/*
130 * TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is 131 * TIPC can be configured to send debug messages to the specified print buffer
131 * the null print buffer. Thes ensures that any system or debug messages 132 * (typically DBG_OUTPUT) or to suppress them entirely.
132 * that are generated without using the above macros are handled correctly.
133 */ 133 */
134 134
135#undef TIPC_OUTPUT 135#ifdef CONFIG_TIPC_DEBUG
136#define TIPC_OUTPUT TIPC_CONS
137 136
138#undef DBG_OUTPUT 137#define dbg(fmt, arg...) \
139#define DBG_OUTPUT TIPC_NULL 138 do { \
139 if (DBG_OUTPUT != TIPC_NULL) \
140 tipc_printf(DBG_OUTPUT, fmt, ## arg); \
141 } while (0)
142#define msg_dbg(msg, txt) \
143 do { \
144 if (DBG_OUTPUT != TIPC_NULL) \
145 tipc_msg_dbg(DBG_OUTPUT, msg, txt); \
146 } while (0)
147#define dump(fmt, arg...) \
148 do { \
149 if (DBG_OUTPUT != TIPC_NULL) \
150 tipc_dump_dbg(DBG_OUTPUT, fmt, ##arg); \
151 } while (0)
152
153void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
154void tipc_dump_dbg(struct print_buf *, const char *fmt, ...);
155
156#else
157
158#define dbg(fmt, arg...) do {} while (0)
159#define msg_dbg(msg, txt) do {} while (0)
160#define dump(fmt, arg...) do {} while (0)
161
162#define tipc_msg_dbg(...) do {} while (0)
163#define tipc_dump_dbg(...) do {} while (0)
140 164
141#endif 165#endif
142 166
@@ -178,7 +202,7 @@ extern atomic_t tipc_user_count;
178 202
179extern int tipc_core_start(void); 203extern int tipc_core_start(void);
180extern void tipc_core_stop(void); 204extern void tipc_core_stop(void);
181extern int tipc_core_start_net(void); 205extern int tipc_core_start_net(unsigned long addr);
182extern void tipc_core_stop_net(void); 206extern void tipc_core_stop_net(void);
183extern int tipc_handler_start(void); 207extern int tipc_handler_start(void);
184extern void tipc_handler_stop(void); 208extern void tipc_handler_stop(void);
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index e809d2a2ce06..29ecae851668 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -2,7 +2,7 @@
2 * net/tipc/dbg.c: TIPC print buffer routines for debugging 2 * net/tipc/dbg.c: TIPC print buffer routines for debugging
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -38,17 +38,43 @@
38#include "config.h" 38#include "config.h"
39#include "dbg.h" 39#include "dbg.h"
40 40
41static char print_string[TIPC_PB_MAX_STR]; 41/*
42static DEFINE_SPINLOCK(print_lock); 42 * TIPC pre-defines the following print buffers:
43 *
44 * TIPC_NULL : null buffer (i.e. print nowhere)
45 * TIPC_CONS : system console
46 * TIPC_LOG : TIPC log buffer
47 *
48 * Additional user-defined print buffers are also permitted.
49 */
43 50
44static struct print_buf null_buf = { NULL, 0, NULL, NULL }; 51static struct print_buf null_buf = { NULL, 0, NULL, 0 };
45struct print_buf *TIPC_NULL = &null_buf; 52struct print_buf *const TIPC_NULL = &null_buf;
46 53
47static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 54static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
48struct print_buf *TIPC_CONS = &cons_buf; 55struct print_buf *const TIPC_CONS = &cons_buf;
49 56
50static struct print_buf log_buf = { NULL, 0, NULL, NULL }; 57static struct print_buf log_buf = { NULL, 0, NULL, 1 };
51struct print_buf *TIPC_LOG = &log_buf; 58struct print_buf *const TIPC_LOG = &log_buf;
59
60/*
61 * Locking policy when using print buffers.
62 *
63 * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to
64 * 'print_string' when writing to a print buffer. This also protects against
65 * concurrent writes to the print buffer being written to.
66 *
67 * 2) tipc_dump() and tipc_log_XXX() leverage the aforementioned
68 * use of 'print_lock' to protect against all types of concurrent operations
69 * on their associated print buffer (not just write operations).
70 *
71 * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
72 * on the caller to prevent simultaneous use of the print buffer(s) being
73 * manipulated.
74 */
75
76static char print_string[TIPC_PB_MAX_STR];
77static DEFINE_SPINLOCK(print_lock);
52 78
53 79
54#define FORMAT(PTR,LEN,FMT) \ 80#define FORMAT(PTR,LEN,FMT) \
@@ -60,27 +86,14 @@ struct print_buf *TIPC_LOG = &log_buf;
60 *(PTR + LEN) = '\0';\ 86 *(PTR + LEN) = '\0';\
61} 87}
62 88
63/*
64 * Locking policy when using print buffers.
65 *
66 * The following routines use 'print_lock' for protection:
67 * 1) tipc_printf() - to protect its print buffer(s) and 'print_string'
68 * 2) TIPC_TEE() - to protect its print buffer(s)
69 * 3) tipc_dump() - to protect its print buffer(s) and 'print_string'
70 * 4) tipc_log_XXX() - to protect TIPC_LOG
71 *
72 * All routines of the form tipc_printbuf_XXX() rely on the caller to prevent
73 * simultaneous use of the print buffer(s) being manipulated.
74 */
75
76/** 89/**
77 * tipc_printbuf_init - initialize print buffer to empty 90 * tipc_printbuf_init - initialize print buffer to empty
78 * @pb: pointer to print buffer structure 91 * @pb: pointer to print buffer structure
79 * @raw: pointer to character array used by print buffer 92 * @raw: pointer to character array used by print buffer
80 * @size: size of character array 93 * @size: size of character array
81 * 94 *
82 * Makes the print buffer a null device that discards anything written to it 95 * Note: If the character array is too small (or absent), the print buffer
83 * if the character array is too small (or absent). 96 * becomes a null device that discards anything written to it.
84 */ 97 */
85 98
86void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) 99void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
@@ -88,13 +101,13 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
88 pb->buf = raw; 101 pb->buf = raw;
89 pb->crs = raw; 102 pb->crs = raw;
90 pb->size = size; 103 pb->size = size;
91 pb->next = NULL; 104 pb->echo = 0;
92 105
93 if (size < TIPC_PB_MIN_SIZE) { 106 if (size < TIPC_PB_MIN_SIZE) {
94 pb->buf = NULL; 107 pb->buf = NULL;
95 } else if (raw) { 108 } else if (raw) {
96 pb->buf[0] = 0; 109 pb->buf[0] = 0;
97 pb->buf[size-1] = ~0; 110 pb->buf[size - 1] = ~0;
98 } 111 }
99} 112}
100 113
@@ -105,7 +118,11 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
105 118
106void tipc_printbuf_reset(struct print_buf *pb) 119void tipc_printbuf_reset(struct print_buf *pb)
107{ 120{
108 tipc_printbuf_init(pb, pb->buf, pb->size); 121 if (pb->buf) {
122 pb->crs = pb->buf;
123 pb->buf[0] = 0;
124 pb->buf[pb->size - 1] = ~0;
125 }
109} 126}
110 127
111/** 128/**
@@ -141,7 +158,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
141 158
142 if (pb->buf[pb->size - 1] == 0) { 159 if (pb->buf[pb->size - 1] == 0) {
143 cp_buf = kmalloc(pb->size, GFP_ATOMIC); 160 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
144 if (cp_buf != NULL){ 161 if (cp_buf) {
145 tipc_printbuf_init(&cb, cp_buf, pb->size); 162 tipc_printbuf_init(&cb, cp_buf, pb->size);
146 tipc_printbuf_move(&cb, pb); 163 tipc_printbuf_move(&cb, pb);
147 tipc_printbuf_move(pb, &cb); 164 tipc_printbuf_move(pb, &cb);
@@ -179,15 +196,16 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
179 } 196 }
180 197
181 if (pb_to->size < pb_from->size) { 198 if (pb_to->size < pb_from->size) {
182 tipc_printbuf_reset(pb_to); 199 strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***");
183 tipc_printf(pb_to, "*** PRINT BUFFER MOVE ERROR ***"); 200 pb_to->buf[pb_to->size - 1] = ~0;
201 pb_to->crs = strchr(pb_to->buf, 0);
184 return; 202 return;
185 } 203 }
186 204
187 /* Copy data from char after cursor to end (if used) */ 205 /* Copy data from char after cursor to end (if used) */
188 206
189 len = pb_from->buf + pb_from->size - pb_from->crs - 2; 207 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
190 if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) { 208 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
191 strcpy(pb_to->buf, pb_from->crs + 1); 209 strcpy(pb_to->buf, pb_from->crs + 1);
192 pb_to->crs = pb_to->buf + len; 210 pb_to->crs = pb_to->buf + len;
193 } else 211 } else
@@ -203,8 +221,8 @@ void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
203} 221}
204 222
205/** 223/**
206 * tipc_printf - append formatted output to print buffer chain 224 * tipc_printf - append formatted output to print buffer
207 * @pb: pointer to chain of print buffers (may be NULL) 225 * @pb: pointer to print buffer
208 * @fmt: formatted info to be printed 226 * @fmt: formatted info to be printed
209 */ 227 */
210 228
@@ -213,68 +231,40 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
213 int chars_to_add; 231 int chars_to_add;
214 int chars_left; 232 int chars_left;
215 char save_char; 233 char save_char;
216 struct print_buf *pb_next;
217 234
218 spin_lock_bh(&print_lock); 235 spin_lock_bh(&print_lock);
236
219 FORMAT(print_string, chars_to_add, fmt); 237 FORMAT(print_string, chars_to_add, fmt);
220 if (chars_to_add >= TIPC_PB_MAX_STR) 238 if (chars_to_add >= TIPC_PB_MAX_STR)
221 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***"); 239 strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
222 240
223 while (pb) { 241 if (pb->buf) {
224 if (pb == TIPC_CONS) 242 chars_left = pb->buf + pb->size - pb->crs - 1;
225 printk(print_string); 243 if (chars_to_add <= chars_left) {
226 else if (pb->buf) { 244 strcpy(pb->crs, print_string);
227 chars_left = pb->buf + pb->size - pb->crs - 1; 245 pb->crs += chars_to_add;
228 if (chars_to_add <= chars_left) { 246 } else if (chars_to_add >= (pb->size - 1)) {
229 strcpy(pb->crs, print_string); 247 strcpy(pb->buf, print_string + chars_to_add + 1
230 pb->crs += chars_to_add; 248 - pb->size);
231 } else if (chars_to_add >= (pb->size - 1)) { 249 pb->crs = pb->buf + pb->size - 1;
232 strcpy(pb->buf, print_string + chars_to_add + 1 250 } else {
233 - pb->size); 251 strcpy(pb->buf, print_string + chars_left);
234 pb->crs = pb->buf + pb->size - 1; 252 save_char = print_string[chars_left];
235 } else { 253 print_string[chars_left] = 0;
236 strcpy(pb->buf, print_string + chars_left); 254 strcpy(pb->crs, print_string);
237 save_char = print_string[chars_left]; 255 print_string[chars_left] = save_char;
238 print_string[chars_left] = 0; 256 pb->crs = pb->buf + chars_to_add - chars_left;
239 strcpy(pb->crs, print_string);
240 print_string[chars_left] = save_char;
241 pb->crs = pb->buf + chars_to_add - chars_left;
242 }
243 } 257 }
244 pb_next = pb->next;
245 pb->next = NULL;
246 pb = pb_next;
247 } 258 }
248 spin_unlock_bh(&print_lock);
249}
250 259
251/** 260 if (pb->echo)
252 * TIPC_TEE - perform next output operation on both print buffers 261 printk(print_string);
253 * @b0: pointer to chain of print buffers (may be NULL)
254 * @b1: pointer to print buffer to add to chain
255 *
256 * Returns pointer to print buffer chain.
257 */
258 262
259struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
260{
261 struct print_buf *pb = b0;
262
263 if (!b0 || (b0 == b1))
264 return b1;
265
266 spin_lock_bh(&print_lock);
267 while (pb->next) {
268 if ((pb->next == b1) || (pb->next == b0))
269 pb->next = pb->next->next;
270 else
271 pb = pb->next;
272 }
273 pb->next = b1;
274 spin_unlock_bh(&print_lock); 263 spin_unlock_bh(&print_lock);
275 return b0;
276} 264}
277 265
266#ifdef CONFIG_TIPC_DEBUG
267
278/** 268/**
279 * print_to_console - write string of bytes to console in multiple chunks 269 * print_to_console - write string of bytes to console in multiple chunks
280 */ 270 */
@@ -321,72 +311,66 @@ static void printbuf_dump(struct print_buf *pb)
321} 311}
322 312
323/** 313/**
324 * tipc_dump - dump non-console print buffer(s) to console 314 * tipc_dump_dbg - dump (non-console) print buffer to console
325 * @pb: pointer to chain of print buffers 315 * @pb: pointer to print buffer
326 */ 316 */
327 317
328void tipc_dump(struct print_buf *pb, const char *fmt, ...) 318void tipc_dump_dbg(struct print_buf *pb, const char *fmt, ...)
329{ 319{
330 struct print_buf *pb_next;
331 int len; 320 int len;
332 321
322 if (pb == TIPC_CONS)
323 return;
324
333 spin_lock_bh(&print_lock); 325 spin_lock_bh(&print_lock);
326
334 FORMAT(print_string, len, fmt); 327 FORMAT(print_string, len, fmt);
335 printk(print_string); 328 printk(print_string);
336 329
337 for (; pb; pb = pb->next) { 330 printk("\n---- Start of %s log dump ----\n\n",
338 if (pb != TIPC_CONS) { 331 (pb == TIPC_LOG) ? "global" : "local");
339 printk("\n---- Start of %s log dump ----\n\n", 332 printbuf_dump(pb);
340 (pb == TIPC_LOG) ? "global" : "local"); 333 tipc_printbuf_reset(pb);
341 printbuf_dump(pb); 334 printk("\n---- End of dump ----\n");
342 tipc_printbuf_reset(pb); 335
343 printk("\n---- End of dump ----\n");
344 }
345 pb_next = pb->next;
346 pb->next = NULL;
347 pb = pb_next;
348 }
349 spin_unlock_bh(&print_lock); 336 spin_unlock_bh(&print_lock);
350} 337}
351 338
339#endif
340
352/** 341/**
353 * tipc_log_stop - free up TIPC log print buffer 342 * tipc_log_resize - change the size of the TIPC log buffer
343 * @log_size: print buffer size to use
354 */ 344 */
355 345
356void tipc_log_stop(void) 346int tipc_log_resize(int log_size)
357{ 347{
348 int res = 0;
349
358 spin_lock_bh(&print_lock); 350 spin_lock_bh(&print_lock);
359 if (TIPC_LOG->buf) { 351 if (TIPC_LOG->buf) {
360 kfree(TIPC_LOG->buf); 352 kfree(TIPC_LOG->buf);
361 TIPC_LOG->buf = NULL; 353 TIPC_LOG->buf = NULL;
362 } 354 }
363 spin_unlock_bh(&print_lock);
364}
365
366/**
367 * tipc_log_reinit - (re)initialize TIPC log print buffer
368 * @log_size: print buffer size to use
369 */
370
371void tipc_log_reinit(int log_size)
372{
373 tipc_log_stop();
374
375 if (log_size) { 355 if (log_size) {
376 if (log_size < TIPC_PB_MIN_SIZE) 356 if (log_size < TIPC_PB_MIN_SIZE)
377 log_size = TIPC_PB_MIN_SIZE; 357 log_size = TIPC_PB_MIN_SIZE;
378 spin_lock_bh(&print_lock); 358 res = TIPC_LOG->echo;
379 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), 359 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
380 log_size); 360 log_size);
381 spin_unlock_bh(&print_lock); 361 TIPC_LOG->echo = res;
362 res = !TIPC_LOG->buf;
382 } 363 }
364 spin_unlock_bh(&print_lock);
365
366 return res;
383} 367}
384 368
385/** 369/**
386 * tipc_log_resize - reconfigure size of TIPC log buffer 370 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
387 */ 371 */
388 372
389struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space) 373struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
390{ 374{
391 u32 value; 375 u32 value;
392 376
@@ -397,7 +381,9 @@ struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space)
397 if (value != delimit(value, 0, 32768)) 381 if (value != delimit(value, 0, 32768))
398 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 382 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
399 " (log size must be 0-32768)"); 383 " (log size must be 0-32768)");
400 tipc_log_reinit(value); 384 if (tipc_log_resize(value))
385 return tipc_cfg_reply_error_string(
386 "unable to create specified log (log size is now 0)");
401 return tipc_cfg_reply_none(); 387 return tipc_cfg_reply_none();
402} 388}
403 389
@@ -410,27 +396,32 @@ struct sk_buff *tipc_log_dump(void)
410 struct sk_buff *reply; 396 struct sk_buff *reply;
411 397
412 spin_lock_bh(&print_lock); 398 spin_lock_bh(&print_lock);
413 if (!TIPC_LOG->buf) 399 if (!TIPC_LOG->buf) {
400 spin_unlock_bh(&print_lock);
414 reply = tipc_cfg_reply_ultra_string("log not activated\n"); 401 reply = tipc_cfg_reply_ultra_string("log not activated\n");
415 else if (tipc_printbuf_empty(TIPC_LOG)) 402 } else if (tipc_printbuf_empty(TIPC_LOG)) {
403 spin_unlock_bh(&print_lock);
416 reply = tipc_cfg_reply_ultra_string("log is empty\n"); 404 reply = tipc_cfg_reply_ultra_string("log is empty\n");
405 }
417 else { 406 else {
418 struct tlv_desc *rep_tlv; 407 struct tlv_desc *rep_tlv;
419 struct print_buf pb; 408 struct print_buf pb;
420 int str_len; 409 int str_len;
421 410
422 str_len = min(TIPC_LOG->size, 32768u); 411 str_len = min(TIPC_LOG->size, 32768u);
412 spin_unlock_bh(&print_lock);
423 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len)); 413 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
424 if (reply) { 414 if (reply) {
425 rep_tlv = (struct tlv_desc *)reply->data; 415 rep_tlv = (struct tlv_desc *)reply->data;
426 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len); 416 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
417 spin_lock_bh(&print_lock);
427 tipc_printbuf_move(&pb, TIPC_LOG); 418 tipc_printbuf_move(&pb, TIPC_LOG);
419 spin_unlock_bh(&print_lock);
428 str_len = strlen(TLV_DATA(rep_tlv)) + 1; 420 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
429 skb_put(reply, TLV_SPACE(str_len)); 421 skb_put(reply, TLV_SPACE(str_len));
430 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 422 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
431 } 423 }
432 } 424 }
433 spin_unlock_bh(&print_lock);
434 return reply; 425 return reply;
435} 426}
436 427
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
index c01b085000e0..5ef1bc8f64ef 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/dbg.h
@@ -2,7 +2,7 @@
2 * net/tipc/dbg.h: Include file for TIPC print buffer routines 2 * net/tipc/dbg.h: Include file for TIPC print buffer routines
3 * 3 *
4 * Copyright (c) 1997-2006, Ericsson AB 4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -42,14 +42,14 @@
42 * @buf: pointer to character array containing print buffer contents 42 * @buf: pointer to character array containing print buffer contents
43 * @size: size of character array 43 * @size: size of character array
44 * @crs: pointer to first unused space in character array (i.e. final NUL) 44 * @crs: pointer to first unused space in character array (i.e. final NUL)
45 * @next: used to link print buffers when printing to more than one at a time 45 * @echo: echo output to system console if non-zero
46 */ 46 */
47 47
48struct print_buf { 48struct print_buf {
49 char *buf; 49 char *buf;
50 u32 size; 50 u32 size;
51 char *crs; 51 char *crs;
52 struct print_buf *next; 52 int echo;
53}; 53};
54 54
55#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */ 55#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
@@ -61,10 +61,10 @@ int tipc_printbuf_empty(struct print_buf *pb);
61int tipc_printbuf_validate(struct print_buf *pb); 61int tipc_printbuf_validate(struct print_buf *pb);
62void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from); 62void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
63 63
64void tipc_log_reinit(int log_size); 64int tipc_log_resize(int log_size);
65void tipc_log_stop(void);
66 65
67struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space); 66struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area,
67 int req_tlv_space);
68struct sk_buff *tipc_log_dump(void); 68struct sk_buff *tipc_log_dump(void);
69 69
70#endif 70#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 5d643e5721eb..1657f0e795ff 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -120,9 +120,8 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
120 120
121 if (buf) { 121 if (buf) {
122 msg = buf_msg(buf); 122 msg = buf_msg(buf);
123 msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE, 123 msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
124 dest_domain); 124 msg_set_non_seq(msg, 1);
125 msg_set_non_seq(msg);
126 msg_set_req_links(msg, req_links); 125 msg_set_req_links(msg, req_links);
127 msg_set_dest_domain(msg, dest_domain); 126 msg_set_dest_domain(msg, dest_domain);
128 msg_set_bc_netid(msg, tipc_net_id); 127 msg_set_bc_netid(msg, tipc_net_id);
@@ -156,11 +155,11 @@ static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
156/** 155/**
157 * tipc_disc_recv_msg - handle incoming link setup message (request or response) 156 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
158 * @buf: buffer containing message 157 * @buf: buffer containing message
158 * @b_ptr: bearer that message arrived on
159 */ 159 */
160 160
161void tipc_disc_recv_msg(struct sk_buff *buf) 161void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
162{ 162{
163 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
164 struct link *link; 163 struct link *link;
165 struct tipc_media_addr media_addr; 164 struct tipc_media_addr media_addr;
166 struct tipc_msg *msg = buf_msg(buf); 165 struct tipc_msg *msg = buf_msg(buf);
@@ -200,9 +199,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
200 dbg(" in own cluster\n"); 199 dbg(" in own cluster\n");
201 if (n_ptr == NULL) { 200 if (n_ptr == NULL) {
202 n_ptr = tipc_node_create(orig); 201 n_ptr = tipc_node_create(orig);
203 } 202 if (!n_ptr)
204 if (n_ptr == NULL) { 203 return;
205 return;
206 } 204 }
207 spin_lock_bh(&n_ptr->lock); 205 spin_lock_bh(&n_ptr->lock);
208 link = n_ptr->links[b_ptr->identity]; 206 link = n_ptr->links[b_ptr->identity];
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 9fd7587b143a..c36eaeb7d5d0 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -48,7 +48,7 @@ struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
48void tipc_disc_update_link_req(struct link_req *req); 48void tipc_disc_update_link_req(struct link_req *req);
49void tipc_disc_stop_link_req(struct link_req *req); 49void tipc_disc_stop_link_req(struct link_req *req);
50 50
51void tipc_disc_recv_msg(struct sk_buff *buf); 51void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
52 52
53void tipc_disc_link_event(u32 addr, char *name, int up); 53void tipc_disc_link_event(u32 addr, char *name, int up);
54#if 0 54#if 0
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 2a26a16e269f..9784a8e963b4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -51,6 +51,12 @@
51 51
52 52
53/* 53/*
54 * Out-of-range value for link session numbers
55 */
56
57#define INVALID_SESSION 0x10000
58
59/*
54 * Limit for deferred reception queue: 60 * Limit for deferred reception queue:
55 */ 61 */
56 62
@@ -147,9 +153,21 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
147 153
148#define LINK_LOG_BUF_SIZE 0 154#define LINK_LOG_BUF_SIZE 0
149 155
150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0) 156#define dbg_link(fmt, arg...) \
151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0) 157 do { \
152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0) 158 if (LINK_LOG_BUF_SIZE) \
159 tipc_printf(&l_ptr->print_buf, fmt, ## arg); \
160 } while (0)
161#define dbg_link_msg(msg, txt) \
162 do { \
163 if (LINK_LOG_BUF_SIZE) \
164 tipc_msg_dbg(&l_ptr->print_buf, msg, txt); \
165 } while (0)
166#define dbg_link_state(txt) \
167 do { \
168 if (LINK_LOG_BUF_SIZE) \
169 link_print(l_ptr, &l_ptr->print_buf, txt); \
170 } while (0)
153#define dbg_link_dump() do { \ 171#define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \ 172 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \ 173 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
@@ -450,9 +468,9 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
450 468
451 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 469 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
452 msg = l_ptr->pmsg; 470 msg = l_ptr->pmsg;
453 msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 471 msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
454 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 472 msg_set_size(msg, sizeof(l_ptr->proto_msg));
455 msg_set_session(msg, tipc_random); 473 msg_set_session(msg, (tipc_random & 0xffff));
456 msg_set_bearer_id(msg, b_ptr->identity); 474 msg_set_bearer_id(msg, b_ptr->identity);
457 strcpy((char *)msg_data(msg), if_name); 475 strcpy((char *)msg_data(msg), if_name);
458 476
@@ -693,10 +711,10 @@ void tipc_link_reset(struct link *l_ptr)
693 u32 checkpoint = l_ptr->next_in_no; 711 u32 checkpoint = l_ptr->next_in_no;
694 int was_active_link = tipc_link_is_active(l_ptr); 712 int was_active_link = tipc_link_is_active(l_ptr);
695 713
696 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1); 714 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
697 715
698 /* Link is down, accept any session: */ 716 /* Link is down, accept any session */
699 l_ptr->peer_session = 0; 717 l_ptr->peer_session = INVALID_SESSION;
700 718
701 /* Prepare for max packet size negotiation */ 719 /* Prepare for max packet size negotiation */
702 link_init_max_pkt(l_ptr); 720 link_init_max_pkt(l_ptr);
@@ -1110,7 +1128,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1110 1128
1111 if (bundler) { 1129 if (bundler) {
1112 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 1130 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1113 TIPC_OK, INT_H_SIZE, l_ptr->addr); 1131 INT_H_SIZE, l_ptr->addr);
1114 skb_copy_to_linear_data(bundler, &bundler_hdr, 1132 skb_copy_to_linear_data(bundler, &bundler_hdr,
1115 INT_H_SIZE); 1133 INT_H_SIZE);
1116 skb_trim(bundler, INT_H_SIZE); 1134 skb_trim(bundler, INT_H_SIZE);
@@ -1374,7 +1392,7 @@ again:
1374 1392
1375 msg_dbg(hdr, ">FRAGMENTING>"); 1393 msg_dbg(hdr, ">FRAGMENTING>");
1376 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1394 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1377 TIPC_OK, INT_H_SIZE, msg_destnode(hdr)); 1395 INT_H_SIZE, msg_destnode(hdr));
1378 msg_set_link_selector(&fragm_hdr, sender->publ.ref); 1396 msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1379 msg_set_size(&fragm_hdr, max_pkt); 1397 msg_set_size(&fragm_hdr, max_pkt);
1380 msg_set_fragm_no(&fragm_hdr, 1); 1398 msg_set_fragm_no(&fragm_hdr, 1);
@@ -1651,7 +1669,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1651 struct tipc_msg *msg = buf_msg(buf); 1669 struct tipc_msg *msg = buf_msg(buf);
1652 1670
1653 warn("Retransmission failure on link <%s>\n", l_ptr->name); 1671 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1654 tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>"); 1672 tipc_msg_dbg(TIPC_OUTPUT, msg, ">RETR-FAIL>");
1655 1673
1656 if (l_ptr->addr) { 1674 if (l_ptr->addr) {
1657 1675
@@ -1748,21 +1766,6 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1748 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1766 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1749} 1767}
1750 1768
1751/*
1752 * link_recv_non_seq: Receive packets which are outside
1753 * the link sequence flow
1754 */
1755
1756static void link_recv_non_seq(struct sk_buff *buf)
1757{
1758 struct tipc_msg *msg = buf_msg(buf);
1759
1760 if (msg_user(msg) == LINK_CONFIG)
1761 tipc_disc_recv_msg(buf);
1762 else
1763 tipc_bclink_recv_pkt(buf);
1764}
1765
1766/** 1769/**
1767 * link_insert_deferred_queue - insert deferred messages back into receive chain 1770 * link_insert_deferred_queue - insert deferred messages back into receive chain
1768 */ 1771 */
@@ -1839,7 +1842,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1839{ 1842{
1840 read_lock_bh(&tipc_net_lock); 1843 read_lock_bh(&tipc_net_lock);
1841 while (head) { 1844 while (head) {
1842 struct bearer *b_ptr; 1845 struct bearer *b_ptr = (struct bearer *)tb_ptr;
1843 struct node *n_ptr; 1846 struct node *n_ptr;
1844 struct link *l_ptr; 1847 struct link *l_ptr;
1845 struct sk_buff *crs; 1848 struct sk_buff *crs;
@@ -1850,9 +1853,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1850 u32 released = 0; 1853 u32 released = 0;
1851 int type; 1854 int type;
1852 1855
1853 b_ptr = (struct bearer *)tb_ptr;
1854 TIPC_SKB_CB(buf)->handle = b_ptr;
1855
1856 head = head->next; 1856 head = head->next;
1857 1857
1858 /* Ensure message is well-formed */ 1858 /* Ensure message is well-formed */
@@ -1871,7 +1871,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1871 msg = buf_msg(buf); 1871 msg = buf_msg(buf);
1872 1872
1873 if (unlikely(msg_non_seq(msg))) { 1873 if (unlikely(msg_non_seq(msg))) {
1874 link_recv_non_seq(buf); 1874 if (msg_user(msg) == LINK_CONFIG)
1875 tipc_disc_recv_msg(buf, b_ptr);
1876 else
1877 tipc_bclink_recv_pkt(buf);
1875 continue; 1878 continue;
1876 } 1879 }
1877 1880
@@ -1978,8 +1981,6 @@ deliver:
1978 if (link_recv_changeover_msg(&l_ptr, &buf)) { 1981 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1979 msg = buf_msg(buf); 1982 msg = buf_msg(buf);
1980 seq_no = msg_seqno(msg); 1983 seq_no = msg_seqno(msg);
1981 TIPC_SKB_CB(buf)->handle
1982 = b_ptr;
1983 if (type == ORIGINAL_MSG) 1984 if (type == ORIGINAL_MSG)
1984 goto deliver; 1985 goto deliver;
1985 goto protocol_check; 1986 goto protocol_check;
@@ -2263,7 +2264,8 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2263 switch (msg_type(msg)) { 2264 switch (msg_type(msg)) {
2264 2265
2265 case RESET_MSG: 2266 case RESET_MSG:
2266 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) { 2267 if (!link_working_unknown(l_ptr) &&
2268 (l_ptr->peer_session != INVALID_SESSION)) {
2267 if (msg_session(msg) == l_ptr->peer_session) { 2269 if (msg_session(msg) == l_ptr->peer_session) {
2268 dbg("Duplicate RESET: %u<->%u\n", 2270 dbg("Duplicate RESET: %u<->%u\n",
2269 msg_session(msg), l_ptr->peer_session); 2271 msg_session(msg), l_ptr->peer_session);
@@ -2424,7 +2426,7 @@ void tipc_link_changeover(struct link *l_ptr)
2424 } 2426 }
2425 2427
2426 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2428 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2427 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2429 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2428 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2430 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2429 msg_set_msgcnt(&tunnel_hdr, msgcount); 2431 msg_set_msgcnt(&tunnel_hdr, msgcount);
2430 dbg("Link changeover requires %u tunnel messages\n", msgcount); 2432 dbg("Link changeover requires %u tunnel messages\n", msgcount);
@@ -2479,7 +2481,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2479 struct tipc_msg tunnel_hdr; 2481 struct tipc_msg tunnel_hdr;
2480 2482
2481 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2483 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2482 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr); 2484 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2483 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2485 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2484 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2486 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2485 iter = l_ptr->first_out; 2487 iter = l_ptr->first_out;
@@ -2672,10 +2674,12 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2672 u32 pack_sz = link_max_pkt(l_ptr); 2674 u32 pack_sz = link_max_pkt(l_ptr);
2673 u32 fragm_sz = pack_sz - INT_H_SIZE; 2675 u32 fragm_sz = pack_sz - INT_H_SIZE;
2674 u32 fragm_no = 1; 2676 u32 fragm_no = 1;
2675 u32 destaddr = msg_destnode(inmsg); 2677 u32 destaddr;
2676 2678
2677 if (msg_short(inmsg)) 2679 if (msg_short(inmsg))
2678 destaddr = l_ptr->addr; 2680 destaddr = l_ptr->addr;
2681 else
2682 destaddr = msg_destnode(inmsg);
2679 2683
2680 if (msg_routed(inmsg)) 2684 if (msg_routed(inmsg))
2681 msg_set_prevnode(inmsg, tipc_own_addr); 2685 msg_set_prevnode(inmsg, tipc_own_addr);
@@ -2683,7 +2687,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2683 /* Prepare reusable fragment header: */ 2687 /* Prepare reusable fragment header: */
2684 2688
2685 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2689 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2686 TIPC_OK, INT_H_SIZE, destaddr); 2690 INT_H_SIZE, destaddr);
2687 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg)); 2691 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2688 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++)); 2692 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2689 msg_set_fragm_no(&fragm_hdr, fragm_no); 2693 msg_set_fragm_no(&fragm_hdr, fragm_no);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 696a8633df75..73dcd00d674e 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,7 +41,9 @@
41#include "bearer.h" 41#include "bearer.h"
42 42
43 43
44void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str) 44#ifdef CONFIG_TIPC_DEBUG
45
46void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
45{ 47{
46 u32 usr = msg_user(msg); 48 u32 usr = msg_user(msg);
47 tipc_printf(buf, str); 49 tipc_printf(buf, str);
@@ -228,13 +230,10 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
228 230
229 switch (usr) { 231 switch (usr) {
230 case CONN_MANAGER: 232 case CONN_MANAGER:
231 case NAME_DISTRIBUTOR:
232 case TIPC_LOW_IMPORTANCE: 233 case TIPC_LOW_IMPORTANCE:
233 case TIPC_MEDIUM_IMPORTANCE: 234 case TIPC_MEDIUM_IMPORTANCE:
234 case TIPC_HIGH_IMPORTANCE: 235 case TIPC_HIGH_IMPORTANCE:
235 case TIPC_CRITICAL_IMPORTANCE: 236 case TIPC_CRITICAL_IMPORTANCE:
236 if (msg_short(msg))
237 break; /* No error */
238 switch (msg_errcode(msg)) { 237 switch (msg_errcode(msg)) {
239 case TIPC_OK: 238 case TIPC_OK:
240 break; 239 break;
@@ -315,9 +314,11 @@ void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str
315 } 314 }
316 tipc_printf(buf, "\n"); 315 tipc_printf(buf, "\n");
317 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) { 316 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
318 tipc_msg_print(buf,msg_get_wrapped(msg)," /"); 317 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
319 } 318 }
320 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { 319 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
321 tipc_msg_print(buf,msg_get_wrapped(msg)," /"); 320 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
322 } 321 }
323} 322}
323
324#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index ad487e8abcc2..7ee6ae238147 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -2,7 +2,7 @@
2 * net/tipc/msg.h: Include file for TIPC message header routines 2 * net/tipc/msg.h: Include file for TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2007, Ericsson AB 4 * Copyright (c) 2000-2007, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems 5 * Copyright (c) 2005-2008, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,14 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
75 m->hdr[w] |= htonl(val); 75 m->hdr[w] |= htonl(val);
76} 76}
77 77
78static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
79{
80 u32 temp = msg->hdr[a];
81
82 msg->hdr[a] = msg->hdr[b];
83 msg->hdr[b] = temp;
84}
85
78/* 86/*
79 * Word 0 87 * Word 0
80 */ 88 */
@@ -119,9 +127,9 @@ static inline int msg_non_seq(struct tipc_msg *m)
119 return msg_bits(m, 0, 20, 1); 127 return msg_bits(m, 0, 20, 1);
120} 128}
121 129
122static inline void msg_set_non_seq(struct tipc_msg *m) 130static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
123{ 131{
124 msg_set_bits(m, 0, 20, 1, 1); 132 msg_set_bits(m, 0, 20, 1, n);
125} 133}
126 134
127static inline int msg_dest_droppable(struct tipc_msg *m) 135static inline int msg_dest_droppable(struct tipc_msg *m)
@@ -224,6 +232,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
224 msg_set_bits(m, 2, 0, 0xffff, n); 232 msg_set_bits(m, 2, 0, 0xffff, n);
225} 233}
226 234
235/*
236 * TIPC may utilize the "link ack #" and "link seq #" fields of a short
237 * message header to hold the destination node for the message, since the
238 * normal "dest node" field isn't present. This cache is only referenced
239 * when required, so populating the cache of a longer message header is
240 * harmless (as long as the header has the two link sequence fields present).
241 *
242 * Note: Host byte order is OK here, since the info never goes off-card.
243 */
244
245static inline u32 msg_destnode_cache(struct tipc_msg *m)
246{
247 return m->hdr[2];
248}
249
250static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode)
251{
252 m->hdr[2] = dnode;
253}
227 254
228/* 255/*
229 * Words 3-10 256 * Words 3-10
@@ -325,7 +352,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
325 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 352 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
326 w0:|vers |msg usr|hdr sz |n|resrv| packet size | 353 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
327 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 354 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
328 w1:|m typ|rsv=0| sequence gap | broadcast ack no | 355 w1:|m typ| sequence gap | broadcast ack no |
329 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 356 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
330 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to | 357 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
331 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 358 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -388,12 +415,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
388 415
389static inline u32 msg_seq_gap(struct tipc_msg *m) 416static inline u32 msg_seq_gap(struct tipc_msg *m)
390{ 417{
391 return msg_bits(m, 1, 16, 0xff); 418 return msg_bits(m, 1, 16, 0x1fff);
392} 419}
393 420
394static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n) 421static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
395{ 422{
396 msg_set_bits(m, 1, 16, 0xff, n); 423 msg_set_bits(m, 1, 16, 0x1fff, n);
397} 424}
398 425
399static inline u32 msg_req_links(struct tipc_msg *m) 426static inline u32 msg_req_links(struct tipc_msg *m)
@@ -696,7 +723,7 @@ static inline u32 msg_tot_importance(struct tipc_msg *m)
696 723
697 724
698static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, 725static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
699 u32 err, u32 hsize, u32 destnode) 726 u32 hsize, u32 destnode)
700{ 727{
701 memset(m, 0, hsize); 728 memset(m, 0, hsize);
702 msg_set_version(m); 729 msg_set_version(m);
@@ -705,7 +732,6 @@ static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
705 msg_set_size(m, hsize); 732 msg_set_size(m, hsize);
706 msg_set_prevnode(m, tipc_own_addr); 733 msg_set_prevnode(m, tipc_own_addr);
707 msg_set_type(m, type); 734 msg_set_type(m, type);
708 msg_set_errcode(m, err);
709 if (!msg_short(m)) { 735 if (!msg_short(m)) {
710 msg_set_orignode(m, tipc_own_addr); 736 msg_set_orignode(m, tipc_own_addr);
711 msg_set_destnode(m, destnode); 737 msg_set_destnode(m, destnode);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 39fd1619febf..10a69894e2fd 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -41,9 +41,6 @@
41#include "msg.h" 41#include "msg.h"
42#include "name_distr.h" 42#include "name_distr.h"
43 43
44#undef DBG_OUTPUT
45#define DBG_OUTPUT NULL
46
47#define ITEM_SIZE sizeof(struct distr_item) 44#define ITEM_SIZE sizeof(struct distr_item)
48 45
49/** 46/**
@@ -106,8 +103,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
106 103
107 if (buf != NULL) { 104 if (buf != NULL) {
108 msg = buf_msg(buf); 105 msg = buf_msg(buf);
109 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, 106 msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest);
110 LONG_H_SIZE, dest);
111 msg_set_size(msg, LONG_H_SIZE + size); 107 msg_set_size(msg, LONG_H_SIZE + size);
112 } 108 }
113 return buf; 109 return buf;
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index ac7dfdda7973..096f7bd240a0 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -74,7 +74,7 @@ struct sub_seq {
74 * @first_free: array index of first unused sub-sequence entry 74 * @first_free: array index of first unused sub-sequence entry
75 * @ns_list: links to adjacent name sequences in hash chain 75 * @ns_list: links to adjacent name sequences in hash chain
76 * @subscriptions: list of subscriptions for this 'type' 76 * @subscriptions: list of subscriptions for this 'type'
77 * @lock: spinlock controlling access to name sequence structure 77 * @lock: spinlock controlling access to publication lists of all sub-sequences
78 */ 78 */
79 79
80struct name_seq { 80struct name_seq {
@@ -905,6 +905,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
905 struct sub_seq *sseq; 905 struct sub_seq *sseq;
906 char typearea[11]; 906 char typearea[11];
907 907
908 if (seq->first_free == 0)
909 return;
910
908 sprintf(typearea, "%-10u", seq->type); 911 sprintf(typearea, "%-10u", seq->type);
909 912
910 if (depth == 1) { 913 if (depth == 1) {
@@ -915,7 +918,9 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
915 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { 918 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
916 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { 919 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
917 tipc_printf(buf, "%s ", typearea); 920 tipc_printf(buf, "%s ", typearea);
921 spin_lock_bh(&seq->lock);
918 subseq_list(sseq, buf, depth, index); 922 subseq_list(sseq, buf, depth, index);
923 spin_unlock_bh(&seq->lock);
919 sprintf(typearea, "%10s", " "); 924 sprintf(typearea, "%10s", " ");
920 } 925 }
921 } 926 }
@@ -1050,15 +1055,12 @@ void tipc_nametbl_dump(void)
1050 1055
1051int tipc_nametbl_init(void) 1056int tipc_nametbl_init(void)
1052{ 1057{
1053 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1058 table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
1054 1059 GFP_ATOMIC);
1055 table.types = kzalloc(array_size, GFP_ATOMIC);
1056 if (!table.types) 1060 if (!table.types)
1057 return -ENOMEM; 1061 return -ENOMEM;
1058 1062
1059 write_lock_bh(&tipc_nametbl_lock);
1060 table.local_publ_count = 0; 1063 table.local_publ_count = 0;
1061 write_unlock_bh(&tipc_nametbl_lock);
1062 return 0; 1064 return 0;
1063} 1065}
1064 1066
diff --git a/net/tipc/net.c b/net/tipc/net.c
index c39c76201e8e..cc51fa483672 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -266,7 +266,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
266 tipc_link_send(buf, dnode, msg_link_selector(msg)); 266 tipc_link_send(buf, dnode, msg_link_selector(msg));
267} 267}
268 268
269int tipc_net_start(void) 269int tipc_net_start(u32 addr)
270{ 270{
271 char addr_string[16]; 271 char addr_string[16];
272 int res; 272 int res;
@@ -274,6 +274,10 @@ int tipc_net_start(void)
274 if (tipc_mode != TIPC_NODE_MODE) 274 if (tipc_mode != TIPC_NODE_MODE)
275 return -ENOPROTOOPT; 275 return -ENOPROTOOPT;
276 276
277 tipc_subscr_stop();
278 tipc_cfg_stop();
279
280 tipc_own_addr = addr;
277 tipc_mode = TIPC_NET_MODE; 281 tipc_mode = TIPC_NET_MODE;
278 tipc_named_reinit(); 282 tipc_named_reinit();
279 tipc_port_reinit(); 283 tipc_port_reinit();
@@ -284,10 +288,10 @@ int tipc_net_start(void)
284 (res = tipc_bclink_init())) { 288 (res = tipc_bclink_init())) {
285 return res; 289 return res;
286 } 290 }
287 tipc_subscr_stop(); 291
288 tipc_cfg_stop();
289 tipc_k_signal((Handler)tipc_subscr_start, 0); 292 tipc_k_signal((Handler)tipc_subscr_start, 0);
290 tipc_k_signal((Handler)tipc_cfg_init, 0); 293 tipc_k_signal((Handler)tipc_cfg_init, 0);
294
291 info("Started in network mode\n"); 295 info("Started in network mode\n");
292 info("Own node address %s, network identity %u\n", 296 info("Own node address %s, network identity %u\n",
293 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 297 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
diff --git a/net/tipc/net.h b/net/tipc/net.h
index a6a0e9976ac9..d154ac2bda9a 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -58,7 +58,7 @@ void tipc_net_route_msg(struct sk_buff *buf);
58struct node *tipc_net_select_remote_node(u32 addr, u32 ref); 58struct node *tipc_net_select_remote_node(u32 addr, u32 ref);
59u32 tipc_net_select_router(u32 addr, u32 ref); 59u32 tipc_net_select_router(u32 addr, u32 ref);
60 60
61int tipc_net_start(void); 61int tipc_net_start(u32 addr);
62void tipc_net_stop(void); 62void tipc_net_stop(void);
63 63
64#endif 64#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 6a7f7b4c2595..c387217bb230 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -2,7 +2,7 @@
2 * net/tipc/netlink.c: TIPC configuration handling 2 * net/tipc/netlink.c: TIPC configuration handling
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -45,15 +45,17 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
45 struct nlmsghdr *req_nlh = info->nlhdr; 45 struct nlmsghdr *req_nlh = info->nlhdr;
46 struct tipc_genlmsghdr *req_userhdr = info->userhdr; 46 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); 47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48 u16 cmd;
48 49
49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) 50 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
50 rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); 51 cmd = TIPC_CMD_NOT_NET_ADMIN;
51 else 52 else
52 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, 53 cmd = req_userhdr->cmd;
53 req_userhdr->cmd, 54
54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, 55 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), 56 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
56 hdr_space); 57 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
58 hdr_space);
57 59
58 if (rep_buf) { 60 if (rep_buf) {
59 skb_push(rep_buf, hdr_space); 61 skb_push(rep_buf, hdr_space);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 598f4d3a0098..34e9a2bb7c19 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -52,16 +52,40 @@ static void node_established_contact(struct node *n_ptr);
52 52
53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ 53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */
54 54
55static DEFINE_SPINLOCK(node_create_lock);
56
55u32 tipc_own_tag = 0; 57u32 tipc_own_tag = 0;
56 58
59/**
60 * tipc_node_create - create neighboring node
61 *
62 * Currently, this routine is called by neighbor discovery code, which holds
63 * net_lock for reading only. We must take node_create_lock to ensure a node
64 * isn't created twice if two different bearers discover the node at the same
65 * time. (It would be preferable to switch to holding net_lock in write mode,
66 * but this is a non-trivial change.)
67 */
68
57struct node *tipc_node_create(u32 addr) 69struct node *tipc_node_create(u32 addr)
58{ 70{
59 struct cluster *c_ptr; 71 struct cluster *c_ptr;
60 struct node *n_ptr; 72 struct node *n_ptr;
61 struct node **curr_node; 73 struct node **curr_node;
62 74
75 spin_lock_bh(&node_create_lock);
76
77 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
78 if (addr < n_ptr->addr)
79 break;
80 if (addr == n_ptr->addr) {
81 spin_unlock_bh(&node_create_lock);
82 return n_ptr;
83 }
84 }
85
63 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 86 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
64 if (!n_ptr) { 87 if (!n_ptr) {
88 spin_unlock_bh(&node_create_lock);
65 warn("Node creation failed, no memory\n"); 89 warn("Node creation failed, no memory\n");
66 return NULL; 90 return NULL;
67 } 91 }
@@ -71,6 +95,7 @@ struct node *tipc_node_create(u32 addr)
71 c_ptr = tipc_cltr_create(addr); 95 c_ptr = tipc_cltr_create(addr);
72 } 96 }
73 if (!c_ptr) { 97 if (!c_ptr) {
98 spin_unlock_bh(&node_create_lock);
74 kfree(n_ptr); 99 kfree(n_ptr);
75 return NULL; 100 return NULL;
76 } 101 }
@@ -91,6 +116,7 @@ struct node *tipc_node_create(u32 addr)
91 } 116 }
92 } 117 }
93 (*curr_node) = n_ptr; 118 (*curr_node) = n_ptr;
119 spin_unlock_bh(&node_create_lock);
94 return n_ptr; 120 return n_ptr;
95} 121}
96 122
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 2f5806410c64..2e0cff408ff9 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -211,15 +211,18 @@ exit:
211} 211}
212 212
213/** 213/**
214 * tipc_createport_raw - create a native TIPC port 214 * tipc_createport_raw - create a generic TIPC port
215 * 215 *
216 * Returns local port reference 216 * Returns port reference, or 0 if unable to create it
217 *
218 * Note: The newly created port is returned in the locked state.
217 */ 219 */
218 220
219u32 tipc_createport_raw(void *usr_handle, 221u32 tipc_createport_raw(void *usr_handle,
220 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 222 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
221 void (*wakeup)(struct tipc_port *), 223 void (*wakeup)(struct tipc_port *),
222 const u32 importance) 224 const u32 importance,
225 struct tipc_port **tp_ptr)
223{ 226{
224 struct port *p_ptr; 227 struct port *p_ptr;
225 struct tipc_msg *msg; 228 struct tipc_msg *msg;
@@ -237,17 +240,12 @@ u32 tipc_createport_raw(void *usr_handle,
237 return 0; 240 return 0;
238 } 241 }
239 242
240 tipc_port_lock(ref);
241 p_ptr->publ.usr_handle = usr_handle; 243 p_ptr->publ.usr_handle = usr_handle;
242 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; 244 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
243 p_ptr->publ.ref = ref; 245 p_ptr->publ.ref = ref;
244 msg = &p_ptr->publ.phdr; 246 msg = &p_ptr->publ.phdr;
245 msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 247 msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0);
246 0);
247 msg_set_orignode(msg, tipc_own_addr);
248 msg_set_prevnode(msg, tipc_own_addr);
249 msg_set_origport(msg, ref); 248 msg_set_origport(msg, ref);
250 msg_set_importance(msg,importance);
251 p_ptr->last_in_seqno = 41; 249 p_ptr->last_in_seqno = 41;
252 p_ptr->sent = 1; 250 p_ptr->sent = 1;
253 INIT_LIST_HEAD(&p_ptr->wait_list); 251 INIT_LIST_HEAD(&p_ptr->wait_list);
@@ -262,7 +260,7 @@ u32 tipc_createport_raw(void *usr_handle,
262 INIT_LIST_HEAD(&p_ptr->port_list); 260 INIT_LIST_HEAD(&p_ptr->port_list);
263 list_add_tail(&p_ptr->port_list, &ports); 261 list_add_tail(&p_ptr->port_list, &ports);
264 spin_unlock_bh(&tipc_port_list_lock); 262 spin_unlock_bh(&tipc_port_list_lock);
265 tipc_port_unlock(p_ptr); 263 *tp_ptr = &p_ptr->publ;
266 return ref; 264 return ref;
267} 265}
268 266
@@ -402,10 +400,10 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
402 buf = buf_acquire(LONG_H_SIZE); 400 buf = buf_acquire(LONG_H_SIZE);
403 if (buf) { 401 if (buf) {
404 msg = buf_msg(buf); 402 msg = buf_msg(buf);
405 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode); 403 msg_init(msg, usr, type, LONG_H_SIZE, destnode);
404 msg_set_errcode(msg, err);
406 msg_set_destport(msg, destport); 405 msg_set_destport(msg, destport);
407 msg_set_origport(msg, origport); 406 msg_set_origport(msg, origport);
408 msg_set_destnode(msg, destnode);
409 msg_set_orignode(msg, orignode); 407 msg_set_orignode(msg, orignode);
410 msg_set_transp_seqno(msg, seqno); 408 msg_set_transp_seqno(msg, seqno);
411 msg_set_msgcnt(msg, ack); 409 msg_set_msgcnt(msg, ack);
@@ -446,17 +444,19 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
446 return data_sz; 444 return data_sz;
447 } 445 }
448 rmsg = buf_msg(rbuf); 446 rmsg = buf_msg(rbuf);
449 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg)); 447 msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg));
448 msg_set_errcode(rmsg, err);
450 msg_set_destport(rmsg, msg_origport(msg)); 449 msg_set_destport(rmsg, msg_origport(msg));
451 msg_set_prevnode(rmsg, tipc_own_addr);
452 msg_set_origport(rmsg, msg_destport(msg)); 450 msg_set_origport(rmsg, msg_destport(msg));
453 if (msg_short(msg)) 451 if (msg_short(msg)) {
454 msg_set_orignode(rmsg, tipc_own_addr); 452 msg_set_orignode(rmsg, tipc_own_addr);
455 else 453 /* leave name type & instance as zeroes */
454 } else {
456 msg_set_orignode(rmsg, msg_destnode(msg)); 455 msg_set_orignode(rmsg, msg_destnode(msg));
456 msg_set_nametype(rmsg, msg_nametype(msg));
457 msg_set_nameinst(rmsg, msg_nameinst(msg));
458 }
457 msg_set_size(rmsg, data_sz + hdr_sz); 459 msg_set_size(rmsg, data_sz + hdr_sz);
458 msg_set_nametype(rmsg, msg_nametype(msg));
459 msg_set_nameinst(rmsg, msg_nameinst(msg));
460 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); 460 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz);
461 461
462 /* send self-abort message when rejecting on a connected port */ 462 /* send self-abort message when rejecting on a connected port */
@@ -778,6 +778,7 @@ void tipc_port_reinit(void)
778 msg = &p_ptr->publ.phdr; 778 msg = &p_ptr->publ.phdr;
779 if (msg_orignode(msg) == tipc_own_addr) 779 if (msg_orignode(msg) == tipc_own_addr)
780 break; 780 break;
781 msg_set_prevnode(msg, tipc_own_addr);
781 msg_set_orignode(msg, tipc_own_addr); 782 msg_set_orignode(msg, tipc_own_addr);
782 } 783 }
783 spin_unlock_bh(&tipc_port_list_lock); 784 spin_unlock_bh(&tipc_port_list_lock);
@@ -838,16 +839,13 @@ static void port_dispatcher_sigh(void *dummy)
838 u32 peer_node = port_peernode(p_ptr); 839 u32 peer_node = port_peernode(p_ptr);
839 840
840 tipc_port_unlock(p_ptr); 841 tipc_port_unlock(p_ptr);
842 if (unlikely(!cb))
843 goto reject;
841 if (unlikely(!connected)) { 844 if (unlikely(!connected)) {
842 if (unlikely(published)) 845 if (tipc_connect2port(dref, &orig))
843 goto reject; 846 goto reject;
844 tipc_connect2port(dref,&orig); 847 } else if ((msg_origport(msg) != peer_port) ||
845 } 848 (msg_orignode(msg) != peer_node))
846 if (unlikely(msg_origport(msg) != peer_port))
847 goto reject;
848 if (unlikely(msg_orignode(msg) != peer_node))
849 goto reject;
850 if (unlikely(!cb))
851 goto reject; 849 goto reject;
852 if (unlikely(++p_ptr->publ.conn_unacked >= 850 if (unlikely(++p_ptr->publ.conn_unacked >=
853 TIPC_FLOW_CONTROL_WIN)) 851 TIPC_FLOW_CONTROL_WIN))
@@ -862,9 +860,7 @@ static void port_dispatcher_sigh(void *dummy)
862 tipc_msg_event cb = up_ptr->msg_cb; 860 tipc_msg_event cb = up_ptr->msg_cb;
863 861
864 tipc_port_unlock(p_ptr); 862 tipc_port_unlock(p_ptr);
865 if (unlikely(connected)) 863 if (unlikely(!cb || connected))
866 goto reject;
867 if (unlikely(!cb))
868 goto reject; 864 goto reject;
869 skb_pull(buf, msg_hdr_sz(msg)); 865 skb_pull(buf, msg_hdr_sz(msg));
870 cb(usr_handle, dref, &buf, msg_data(msg), 866 cb(usr_handle, dref, &buf, msg_data(msg),
@@ -877,11 +873,7 @@ static void port_dispatcher_sigh(void *dummy)
877 tipc_named_msg_event cb = up_ptr->named_msg_cb; 873 tipc_named_msg_event cb = up_ptr->named_msg_cb;
878 874
879 tipc_port_unlock(p_ptr); 875 tipc_port_unlock(p_ptr);
880 if (unlikely(connected)) 876 if (unlikely(!cb || connected || !published))
881 goto reject;
882 if (unlikely(!cb))
883 goto reject;
884 if (unlikely(!published))
885 goto reject; 877 goto reject;
886 dseq.type = msg_nametype(msg); 878 dseq.type = msg_nametype(msg);
887 dseq.lower = msg_nameinst(msg); 879 dseq.lower = msg_nameinst(msg);
@@ -908,11 +900,10 @@ err:
908 u32 peer_node = port_peernode(p_ptr); 900 u32 peer_node = port_peernode(p_ptr);
909 901
910 tipc_port_unlock(p_ptr); 902 tipc_port_unlock(p_ptr);
911 if (!connected || !cb) 903 if (!cb || !connected)
912 break;
913 if (msg_origport(msg) != peer_port)
914 break; 904 break;
915 if (msg_orignode(msg) != peer_node) 905 if ((msg_origport(msg) != peer_port) ||
906 (msg_orignode(msg) != peer_node))
916 break; 907 break;
917 tipc_disconnect(dref); 908 tipc_disconnect(dref);
918 skb_pull(buf, msg_hdr_sz(msg)); 909 skb_pull(buf, msg_hdr_sz(msg));
@@ -924,7 +915,7 @@ err:
924 tipc_msg_err_event cb = up_ptr->err_cb; 915 tipc_msg_err_event cb = up_ptr->err_cb;
925 916
926 tipc_port_unlock(p_ptr); 917 tipc_port_unlock(p_ptr);
927 if (connected || !cb) 918 if (!cb || connected)
928 break; 919 break;
929 skb_pull(buf, msg_hdr_sz(msg)); 920 skb_pull(buf, msg_hdr_sz(msg));
930 cb(usr_handle, dref, &buf, msg_data(msg), 921 cb(usr_handle, dref, &buf, msg_data(msg),
@@ -937,7 +928,7 @@ err:
937 up_ptr->named_err_cb; 928 up_ptr->named_err_cb;
938 929
939 tipc_port_unlock(p_ptr); 930 tipc_port_unlock(p_ptr);
940 if (connected || !cb) 931 if (!cb || connected)
941 break; 932 break;
942 dseq.type = msg_nametype(msg); 933 dseq.type = msg_nametype(msg);
943 dseq.lower = msg_nameinst(msg); 934 dseq.lower = msg_nameinst(msg);
@@ -1053,6 +1044,7 @@ int tipc_createport(u32 user_ref,
1053{ 1044{
1054 struct user_port *up_ptr; 1045 struct user_port *up_ptr;
1055 struct port *p_ptr; 1046 struct port *p_ptr;
1047 struct tipc_port *tp_ptr;
1056 u32 ref; 1048 u32 ref;
1057 1049
1058 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1050 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
@@ -1060,12 +1052,13 @@ int tipc_createport(u32 user_ref,
1060 warn("Port creation failed, no memory\n"); 1052 warn("Port creation failed, no memory\n");
1061 return -ENOMEM; 1053 return -ENOMEM;
1062 } 1054 }
1063 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); 1055 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
1064 p_ptr = tipc_port_lock(ref); 1056 importance, &tp_ptr);
1065 if (!p_ptr) { 1057 if (ref == 0) {
1066 kfree(up_ptr); 1058 kfree(up_ptr);
1067 return -ENOMEM; 1059 return -ENOMEM;
1068 } 1060 }
1061 p_ptr = (struct port *)tp_ptr;
1069 1062
1070 p_ptr->user_port = up_ptr; 1063 p_ptr->user_port = up_ptr;
1071 up_ptr->user_ref = user_ref; 1064 up_ptr->user_ref = user_ref;
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 89cbab24d08f..a101de86824d 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -142,9 +142,13 @@ void tipc_ref_table_stop(void)
142/** 142/**
143 * tipc_ref_acquire - create reference to an object 143 * tipc_ref_acquire - create reference to an object
144 * 144 *
145 * Return a unique reference value which can be translated back to the pointer 145 * Register an object pointer in reference table and lock the object.
146 * 'object' at a later time. Also, pass back a pointer to the lock protecting 146 * Returns a unique reference value that is used from then on to retrieve the
147 * the object, but without locking it. 147 * object pointer, or to determine that the object has been deregistered.
148 *
149 * Note: The object is returned in the locked state so that the caller can
150 * register a partially initialized object, without running the risk that
151 * the object will be accessed before initialization is complete.
148 */ 152 */
149 153
150u32 tipc_ref_acquire(void *object, spinlock_t **lock) 154u32 tipc_ref_acquire(void *object, spinlock_t **lock)
@@ -178,13 +182,13 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
178 ref = (next_plus_upper & ~index_mask) + index; 182 ref = (next_plus_upper & ~index_mask) + index;
179 entry->ref = ref; 183 entry->ref = ref;
180 entry->object = object; 184 entry->object = object;
181 spin_unlock_bh(&entry->lock);
182 *lock = &entry->lock; 185 *lock = &entry->lock;
183 } 186 }
184 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { 187 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
185 index = tipc_ref_table.init_point++; 188 index = tipc_ref_table.init_point++;
186 entry = &(tipc_ref_table.entries[index]); 189 entry = &(tipc_ref_table.entries[index]);
187 spin_lock_init(&entry->lock); 190 spin_lock_init(&entry->lock);
191 spin_lock_bh(&entry->lock);
188 ref = tipc_ref_table.start_mask + index; 192 ref = tipc_ref_table.start_mask + index;
189 entry->ref = ref; 193 entry->ref = ref;
190 entry->object = object; 194 entry->object = object;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 230f9ca2ad6b..38f48795b40e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -188,6 +188,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
188 const struct proto_ops *ops; 188 const struct proto_ops *ops;
189 socket_state state; 189 socket_state state;
190 struct sock *sk; 190 struct sock *sk;
191 struct tipc_port *tp_ptr;
191 u32 portref; 192 u32 portref;
192 193
193 /* Validate arguments */ 194 /* Validate arguments */
@@ -225,7 +226,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
225 /* Allocate TIPC port for socket to use */ 226 /* Allocate TIPC port for socket to use */
226 227
227 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, 228 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
228 TIPC_LOW_IMPORTANCE); 229 TIPC_LOW_IMPORTANCE, &tp_ptr);
229 if (unlikely(portref == 0)) { 230 if (unlikely(portref == 0)) {
230 sk_free(sk); 231 sk_free(sk);
231 return -ENOMEM; 232 return -ENOMEM;
@@ -241,6 +242,8 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
241 sk->sk_backlog_rcv = backlog_rcv; 242 sk->sk_backlog_rcv = backlog_rcv;
242 tipc_sk(sk)->p = tipc_get_port(portref); 243 tipc_sk(sk)->p = tipc_get_port(portref);
243 244
245 spin_unlock_bh(tp_ptr->lock);
246
244 if (sock->state == SS_READY) { 247 if (sock->state == SS_READY) {
245 tipc_set_portunreturnable(portref, 1); 248 tipc_set_portunreturnable(portref, 1);
246 if (sock->type == SOCK_DGRAM) 249 if (sock->type == SOCK_DGRAM)
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 8c01ccd3626c..0326d3060bc7 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/subscr.c: TIPC subscription service 2 * net/tipc/subscr.c: TIPC network topology service
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -36,27 +36,24 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "dbg.h" 38#include "dbg.h"
39#include "subscr.h"
40#include "name_table.h" 39#include "name_table.h"
40#include "port.h"
41#include "ref.h" 41#include "ref.h"
42#include "subscr.h"
42 43
43/** 44/**
44 * struct subscriber - TIPC network topology subscriber 45 * struct subscriber - TIPC network topology subscriber
45 * @ref: object reference to subscriber object itself 46 * @port_ref: object reference to server port connecting to subscriber
46 * @lock: pointer to spinlock controlling access to subscriber object 47 * @lock: pointer to spinlock controlling access to subscriber's server port
47 * @subscriber_list: adjacent subscribers in top. server's list of subscribers 48 * @subscriber_list: adjacent subscribers in top. server's list of subscribers
48 * @subscription_list: list of subscription objects for this subscriber 49 * @subscription_list: list of subscription objects for this subscriber
49 * @port_ref: object reference to port used to communicate with subscriber
50 * @swap: indicates if subscriber uses opposite endianness in its messages
51 */ 50 */
52 51
53struct subscriber { 52struct subscriber {
54 u32 ref; 53 u32 port_ref;
55 spinlock_t *lock; 54 spinlock_t *lock;
56 struct list_head subscriber_list; 55 struct list_head subscriber_list;
57 struct list_head subscription_list; 56 struct list_head subscription_list;
58 u32 port_ref;
59 int swap;
60}; 57};
61 58
62/** 59/**
@@ -88,13 +85,14 @@ static struct top_srv topsrv = { 0 };
88 85
89static u32 htohl(u32 in, int swap) 86static u32 htohl(u32 in, int swap)
90{ 87{
91 char *c = (char *)&in; 88 return swap ? (u32)___constant_swab32(in) : in;
92
93 return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
94} 89}
95 90
96/** 91/**
97 * subscr_send_event - send a message containing a tipc_event to the subscriber 92 * subscr_send_event - send a message containing a tipc_event to the subscriber
93 *
94 * Note: Must not hold subscriber's server port lock, since tipc_send() will
95 * try to take the lock if the message is rejected and returned!
98 */ 96 */
99 97
100static void subscr_send_event(struct subscription *sub, 98static void subscr_send_event(struct subscription *sub,
@@ -109,12 +107,12 @@ static void subscr_send_event(struct subscription *sub,
109 msg_sect.iov_base = (void *)&sub->evt; 107 msg_sect.iov_base = (void *)&sub->evt;
110 msg_sect.iov_len = sizeof(struct tipc_event); 108 msg_sect.iov_len = sizeof(struct tipc_event);
111 109
112 sub->evt.event = htohl(event, sub->owner->swap); 110 sub->evt.event = htohl(event, sub->swap);
113 sub->evt.found_lower = htohl(found_lower, sub->owner->swap); 111 sub->evt.found_lower = htohl(found_lower, sub->swap);
114 sub->evt.found_upper = htohl(found_upper, sub->owner->swap); 112 sub->evt.found_upper = htohl(found_upper, sub->swap);
115 sub->evt.port.ref = htohl(port_ref, sub->owner->swap); 113 sub->evt.port.ref = htohl(port_ref, sub->swap);
116 sub->evt.port.node = htohl(node, sub->owner->swap); 114 sub->evt.port.node = htohl(node, sub->swap);
117 tipc_send(sub->owner->port_ref, 1, &msg_sect); 115 tipc_send(sub->server_ref, 1, &msg_sect);
118} 116}
119 117
120/** 118/**
@@ -151,13 +149,12 @@ void tipc_subscr_report_overlap(struct subscription *sub,
151 u32 node, 149 u32 node,
152 int must) 150 int must)
153{ 151{
154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
155 sub->seq.upper, found_lower, found_upper);
156 if (!tipc_subscr_overlap(sub, found_lower, found_upper)) 152 if (!tipc_subscr_overlap(sub, found_lower, found_upper))
157 return; 153 return;
158 if (!must && !(sub->filter & TIPC_SUB_PORTS)) 154 if (!must && !(sub->filter & TIPC_SUB_PORTS))
159 return; 155 return;
160 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); 156
157 sub->event_cb(sub, found_lower, found_upper, event, port_ref, node);
161} 158}
162 159
163/** 160/**
@@ -166,20 +163,18 @@ void tipc_subscr_report_overlap(struct subscription *sub,
166 163
167static void subscr_timeout(struct subscription *sub) 164static void subscr_timeout(struct subscription *sub)
168{ 165{
169 struct subscriber *subscriber; 166 struct port *server_port;
170 u32 subscriber_ref;
171 167
172 /* Validate subscriber reference (in case subscriber is terminating) */ 168 /* Validate server port reference (in case subscriber is terminating) */
173 169
174 subscriber_ref = sub->owner->ref; 170 server_port = tipc_port_lock(sub->server_ref);
175 subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref); 171 if (server_port == NULL)
176 if (subscriber == NULL)
177 return; 172 return;
178 173
179 /* Validate timeout (in case subscription is being cancelled) */ 174 /* Validate timeout (in case subscription is being cancelled) */
180 175
181 if (sub->timeout == TIPC_WAIT_FOREVER) { 176 if (sub->timeout == TIPC_WAIT_FOREVER) {
182 tipc_ref_unlock(subscriber_ref); 177 tipc_port_unlock(server_port);
183 return; 178 return;
184 } 179 }
185 180
@@ -187,19 +182,21 @@ static void subscr_timeout(struct subscription *sub)
187 182
188 tipc_nametbl_unsubscribe(sub); 183 tipc_nametbl_unsubscribe(sub);
189 184
190 /* Notify subscriber of timeout, then unlink subscription */ 185 /* Unlink subscription from subscriber */
191 186
192 subscr_send_event(sub,
193 sub->evt.s.seq.lower,
194 sub->evt.s.seq.upper,
195 TIPC_SUBSCR_TIMEOUT,
196 0,
197 0);
198 list_del(&sub->subscription_list); 187 list_del(&sub->subscription_list);
199 188
189 /* Release subscriber's server port */
190
191 tipc_port_unlock(server_port);
192
193 /* Notify subscriber of timeout */
194
195 subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
196 TIPC_SUBSCR_TIMEOUT, 0, 0);
197
200 /* Now destroy subscription */ 198 /* Now destroy subscription */
201 199
202 tipc_ref_unlock(subscriber_ref);
203 k_term_timer(&sub->timer); 200 k_term_timer(&sub->timer);
204 kfree(sub); 201 kfree(sub);
205 atomic_dec(&topsrv.subscription_count); 202 atomic_dec(&topsrv.subscription_count);
@@ -208,7 +205,7 @@ static void subscr_timeout(struct subscription *sub)
208/** 205/**
209 * subscr_del - delete a subscription within a subscription list 206 * subscr_del - delete a subscription within a subscription list
210 * 207 *
211 * Called with subscriber locked. 208 * Called with subscriber port locked.
212 */ 209 */
213 210
214static void subscr_del(struct subscription *sub) 211static void subscr_del(struct subscription *sub)
@@ -222,7 +219,7 @@ static void subscr_del(struct subscription *sub)
222/** 219/**
223 * subscr_terminate - terminate communication with a subscriber 220 * subscr_terminate - terminate communication with a subscriber
224 * 221 *
225 * Called with subscriber locked. Routine must temporarily release this lock 222 * Called with subscriber port locked. Routine must temporarily release lock
226 * to enable subscription timeout routine(s) to finish without deadlocking; 223 * to enable subscription timeout routine(s) to finish without deadlocking;
227 * the lock is then reclaimed to allow caller to release it upon return. 224 * the lock is then reclaimed to allow caller to release it upon return.
228 * (This should work even in the unlikely event some other thread creates 225 * (This should work even in the unlikely event some other thread creates
@@ -232,14 +229,21 @@ static void subscr_del(struct subscription *sub)
232 229
233static void subscr_terminate(struct subscriber *subscriber) 230static void subscr_terminate(struct subscriber *subscriber)
234{ 231{
232 u32 port_ref;
235 struct subscription *sub; 233 struct subscription *sub;
236 struct subscription *sub_temp; 234 struct subscription *sub_temp;
237 235
238 /* Invalidate subscriber reference */ 236 /* Invalidate subscriber reference */
239 237
240 tipc_ref_discard(subscriber->ref); 238 port_ref = subscriber->port_ref;
239 subscriber->port_ref = 0;
241 spin_unlock_bh(subscriber->lock); 240 spin_unlock_bh(subscriber->lock);
242 241
242 /* Sever connection to subscriber */
243
244 tipc_shutdown(port_ref);
245 tipc_deleteport(port_ref);
246
243 /* Destroy any existing subscriptions for subscriber */ 247 /* Destroy any existing subscriptions for subscriber */
244 248
245 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 249 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
@@ -253,27 +257,25 @@ static void subscr_terminate(struct subscriber *subscriber)
253 subscr_del(sub); 257 subscr_del(sub);
254 } 258 }
255 259
256 /* Sever connection to subscriber */
257
258 tipc_shutdown(subscriber->port_ref);
259 tipc_deleteport(subscriber->port_ref);
260
261 /* Remove subscriber from topology server's subscriber list */ 260 /* Remove subscriber from topology server's subscriber list */
262 261
263 spin_lock_bh(&topsrv.lock); 262 spin_lock_bh(&topsrv.lock);
264 list_del(&subscriber->subscriber_list); 263 list_del(&subscriber->subscriber_list);
265 spin_unlock_bh(&topsrv.lock); 264 spin_unlock_bh(&topsrv.lock);
266 265
267 /* Now destroy subscriber */ 266 /* Reclaim subscriber lock */
268 267
269 spin_lock_bh(subscriber->lock); 268 spin_lock_bh(subscriber->lock);
269
270 /* Now destroy subscriber */
271
270 kfree(subscriber); 272 kfree(subscriber);
271} 273}
272 274
273/** 275/**
274 * subscr_cancel - handle subscription cancellation request 276 * subscr_cancel - handle subscription cancellation request
275 * 277 *
276 * Called with subscriber locked. Routine must temporarily release this lock 278 * Called with subscriber port locked. Routine must temporarily release lock
277 * to enable the subscription timeout routine to finish without deadlocking; 279 * to enable the subscription timeout routine to finish without deadlocking;
278 * the lock is then reclaimed to allow caller to release it upon return. 280 * the lock is then reclaimed to allow caller to release it upon return.
279 * 281 *
@@ -316,27 +318,25 @@ static void subscr_cancel(struct tipc_subscr *s,
316/** 318/**
317 * subscr_subscribe - create subscription for subscriber 319 * subscr_subscribe - create subscription for subscriber
318 * 320 *
319 * Called with subscriber locked 321 * Called with subscriber port locked.
320 */ 322 */
321 323
322static void subscr_subscribe(struct tipc_subscr *s, 324static struct subscription *subscr_subscribe(struct tipc_subscr *s,
323 struct subscriber *subscriber) 325 struct subscriber *subscriber)
324{ 326{
325 struct subscription *sub; 327 struct subscription *sub;
328 int swap;
326 329
327 /* Determine/update subscriber's endianness */ 330 /* Determine subscriber's endianness */
328 331
329 if (s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)) 332 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
330 subscriber->swap = 0;
331 else
332 subscriber->swap = 1;
333 333
334 /* Detect & process a subscription cancellation request */ 334 /* Detect & process a subscription cancellation request */
335 335
336 if (s->filter & htohl(TIPC_SUB_CANCEL, subscriber->swap)) { 336 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
337 s->filter &= ~htohl(TIPC_SUB_CANCEL, subscriber->swap); 337 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
338 subscr_cancel(s, subscriber); 338 subscr_cancel(s, subscriber);
339 return; 339 return NULL;
340 } 340 }
341 341
342 /* Refuse subscription if global limit exceeded */ 342 /* Refuse subscription if global limit exceeded */
@@ -345,63 +345,66 @@ static void subscr_subscribe(struct tipc_subscr *s,
345 warn("Subscription rejected, subscription limit reached (%u)\n", 345 warn("Subscription rejected, subscription limit reached (%u)\n",
346 tipc_max_subscriptions); 346 tipc_max_subscriptions);
347 subscr_terminate(subscriber); 347 subscr_terminate(subscriber);
348 return; 348 return NULL;
349 } 349 }
350 350
351 /* Allocate subscription object */ 351 /* Allocate subscription object */
352 352
353 sub = kzalloc(sizeof(*sub), GFP_ATOMIC); 353 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
354 if (!sub) { 354 if (!sub) {
355 warn("Subscription rejected, no memory\n"); 355 warn("Subscription rejected, no memory\n");
356 subscr_terminate(subscriber); 356 subscr_terminate(subscriber);
357 return; 357 return NULL;
358 } 358 }
359 359
360 /* Initialize subscription object */ 360 /* Initialize subscription object */
361 361
362 sub->seq.type = htohl(s->seq.type, subscriber->swap); 362 sub->seq.type = htohl(s->seq.type, swap);
363 sub->seq.lower = htohl(s->seq.lower, subscriber->swap); 363 sub->seq.lower = htohl(s->seq.lower, swap);
364 sub->seq.upper = htohl(s->seq.upper, subscriber->swap); 364 sub->seq.upper = htohl(s->seq.upper, swap);
365 sub->timeout = htohl(s->timeout, subscriber->swap); 365 sub->timeout = htohl(s->timeout, swap);
366 sub->filter = htohl(s->filter, subscriber->swap); 366 sub->filter = htohl(s->filter, swap);
367 if ((!(sub->filter & TIPC_SUB_PORTS) 367 if ((!(sub->filter & TIPC_SUB_PORTS)
368 == !(sub->filter & TIPC_SUB_SERVICE)) 368 == !(sub->filter & TIPC_SUB_SERVICE))
369 || (sub->seq.lower > sub->seq.upper)) { 369 || (sub->seq.lower > sub->seq.upper)) {
370 warn("Subscription rejected, illegal request\n"); 370 warn("Subscription rejected, illegal request\n");
371 kfree(sub); 371 kfree(sub);
372 subscr_terminate(subscriber); 372 subscr_terminate(subscriber);
373 return; 373 return NULL;
374 } 374 }
375 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); 375 sub->event_cb = subscr_send_event;
376 INIT_LIST_HEAD(&sub->subscription_list);
377 INIT_LIST_HEAD(&sub->nameseq_list); 376 INIT_LIST_HEAD(&sub->nameseq_list);
378 list_add(&sub->subscription_list, &subscriber->subscription_list); 377 list_add(&sub->subscription_list, &subscriber->subscription_list);
378 sub->server_ref = subscriber->port_ref;
379 sub->swap = swap;
380 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
379 atomic_inc(&topsrv.subscription_count); 381 atomic_inc(&topsrv.subscription_count);
380 if (sub->timeout != TIPC_WAIT_FOREVER) { 382 if (sub->timeout != TIPC_WAIT_FOREVER) {
381 k_init_timer(&sub->timer, 383 k_init_timer(&sub->timer,
382 (Handler)subscr_timeout, (unsigned long)sub); 384 (Handler)subscr_timeout, (unsigned long)sub);
383 k_start_timer(&sub->timer, sub->timeout); 385 k_start_timer(&sub->timer, sub->timeout);
384 } 386 }
385 sub->owner = subscriber; 387
386 tipc_nametbl_subscribe(sub); 388 return sub;
387} 389}
388 390
389/** 391/**
390 * subscr_conn_shutdown_event - handle termination request from subscriber 392 * subscr_conn_shutdown_event - handle termination request from subscriber
393 *
394 * Called with subscriber's server port unlocked.
391 */ 395 */
392 396
393static void subscr_conn_shutdown_event(void *usr_handle, 397static void subscr_conn_shutdown_event(void *usr_handle,
394 u32 portref, 398 u32 port_ref,
395 struct sk_buff **buf, 399 struct sk_buff **buf,
396 unsigned char const *data, 400 unsigned char const *data,
397 unsigned int size, 401 unsigned int size,
398 int reason) 402 int reason)
399{ 403{
400 struct subscriber *subscriber; 404 struct subscriber *subscriber = usr_handle;
401 spinlock_t *subscriber_lock; 405 spinlock_t *subscriber_lock;
402 406
403 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); 407 if (tipc_port_lock(port_ref) == NULL)
404 if (subscriber == NULL)
405 return; 408 return;
406 409
407 subscriber_lock = subscriber->lock; 410 subscriber_lock = subscriber->lock;
@@ -411,6 +414,8 @@ static void subscr_conn_shutdown_event(void *usr_handle,
411 414
412/** 415/**
413 * subscr_conn_msg_event - handle new subscription request from subscriber 416 * subscr_conn_msg_event - handle new subscription request from subscriber
417 *
418 * Called with subscriber's server port unlocked.
414 */ 419 */
415 420
416static void subscr_conn_msg_event(void *usr_handle, 421static void subscr_conn_msg_event(void *usr_handle,
@@ -419,20 +424,46 @@ static void subscr_conn_msg_event(void *usr_handle,
419 const unchar *data, 424 const unchar *data,
420 u32 size) 425 u32 size)
421{ 426{
422 struct subscriber *subscriber; 427 struct subscriber *subscriber = usr_handle;
423 spinlock_t *subscriber_lock; 428 spinlock_t *subscriber_lock;
429 struct subscription *sub;
430
431 /*
432 * Lock subscriber's server port (& make a local copy of lock pointer,
433 * in case subscriber is deleted while processing subscription request)
434 */
424 435
425 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle); 436 if (tipc_port_lock(port_ref) == NULL)
426 if (subscriber == NULL)
427 return; 437 return;
428 438
429 subscriber_lock = subscriber->lock; 439 subscriber_lock = subscriber->lock;
430 if (size != sizeof(struct tipc_subscr))
431 subscr_terminate(subscriber);
432 else
433 subscr_subscribe((struct tipc_subscr *)data, subscriber);
434 440
435 spin_unlock_bh(subscriber_lock); 441 if (size != sizeof(struct tipc_subscr)) {
442 subscr_terminate(subscriber);
443 spin_unlock_bh(subscriber_lock);
444 } else {
445 sub = subscr_subscribe((struct tipc_subscr *)data, subscriber);
446 spin_unlock_bh(subscriber_lock);
447 if (sub != NULL) {
448
449 /*
450 * We must release the server port lock before adding a
451 * subscription to the name table since TIPC needs to be
452 * able to (re)acquire the port lock if an event message
453 * issued by the subscription process is rejected and
454 * returned. The subscription cannot be deleted while
455 * it is being added to the name table because:
456 * a) the single-threading of the native API port code
457 * ensures the subscription cannot be cancelled and
458 * the subscriber connection cannot be broken, and
459 * b) the name table lock ensures the subscription
460 * timeout code cannot delete the subscription,
461 * so the subscription object is still protected.
462 */
463
464 tipc_nametbl_subscribe(sub);
465 }
466 }
436} 467}
437 468
438/** 469/**
@@ -448,16 +479,10 @@ static void subscr_named_msg_event(void *usr_handle,
448 struct tipc_portid const *orig, 479 struct tipc_portid const *orig,
449 struct tipc_name_seq const *dest) 480 struct tipc_name_seq const *dest)
450{ 481{
451 struct subscriber *subscriber; 482 static struct iovec msg_sect = {NULL, 0};
452 struct iovec msg_sect = {NULL, 0};
453 spinlock_t *subscriber_lock;
454 483
455 dbg("subscr_named_msg_event: orig = %x own = %x,\n", 484 struct subscriber *subscriber;
456 orig->node, tipc_own_addr); 485 u32 server_port_ref;
457 if (size && (size != sizeof(struct tipc_subscr))) {
458 warn("Subscriber rejected, invalid subscription size\n");
459 return;
460 }
461 486
462 /* Create subscriber object */ 487 /* Create subscriber object */
463 488
@@ -468,17 +493,11 @@ static void subscr_named_msg_event(void *usr_handle,
468 } 493 }
469 INIT_LIST_HEAD(&subscriber->subscription_list); 494 INIT_LIST_HEAD(&subscriber->subscription_list);
470 INIT_LIST_HEAD(&subscriber->subscriber_list); 495 INIT_LIST_HEAD(&subscriber->subscriber_list);
471 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
472 if (subscriber->ref == 0) {
473 warn("Subscriber rejected, reference table exhausted\n");
474 kfree(subscriber);
475 return;
476 }
477 496
478 /* Establish a connection to subscriber */ 497 /* Create server port & establish connection to subscriber */
479 498
480 tipc_createport(topsrv.user_ref, 499 tipc_createport(topsrv.user_ref,
481 (void *)(unsigned long)subscriber->ref, 500 subscriber,
482 importance, 501 importance,
483 NULL, 502 NULL,
484 NULL, 503 NULL,
@@ -490,32 +509,36 @@ static void subscr_named_msg_event(void *usr_handle,
490 &subscriber->port_ref); 509 &subscriber->port_ref);
491 if (subscriber->port_ref == 0) { 510 if (subscriber->port_ref == 0) {
492 warn("Subscriber rejected, unable to create port\n"); 511 warn("Subscriber rejected, unable to create port\n");
493 tipc_ref_discard(subscriber->ref);
494 kfree(subscriber); 512 kfree(subscriber);
495 return; 513 return;
496 } 514 }
497 tipc_connect2port(subscriber->port_ref, orig); 515 tipc_connect2port(subscriber->port_ref, orig);
498 516
517 /* Lock server port (& save lock address for future use) */
518
519 subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
499 520
500 /* Add subscriber to topology server's subscriber list */ 521 /* Add subscriber to topology server's subscriber list */
501 522
502 tipc_ref_lock(subscriber->ref);
503 spin_lock_bh(&topsrv.lock); 523 spin_lock_bh(&topsrv.lock);
504 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); 524 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
505 spin_unlock_bh(&topsrv.lock); 525 spin_unlock_bh(&topsrv.lock);
506 526
507 /* 527 /* Unlock server port */
508 * Subscribe now if message contains a subscription,
509 * otherwise send an empty response to complete connection handshaking
510 */
511 528
512 subscriber_lock = subscriber->lock; 529 server_port_ref = subscriber->port_ref;
513 if (size) 530 spin_unlock_bh(subscriber->lock);
514 subscr_subscribe((struct tipc_subscr *)data, subscriber);
515 else
516 tipc_send(subscriber->port_ref, 1, &msg_sect);
517 531
518 spin_unlock_bh(subscriber_lock); 532 /* Send an ACK- to complete connection handshaking */
533
534 tipc_send(server_port_ref, 1, &msg_sect);
535
536 /* Handle optional subscription request */
537
538 if (size != 0) {
539 subscr_conn_msg_event(subscriber, server_port_ref,
540 buf, data, size);
541 }
519} 542}
520 543
521int tipc_subscr_start(void) 544int tipc_subscr_start(void)
@@ -574,8 +597,8 @@ void tipc_subscr_stop(void)
574 list_for_each_entry_safe(subscriber, subscriber_temp, 597 list_for_each_entry_safe(subscriber, subscriber_temp,
575 &topsrv.subscriber_list, 598 &topsrv.subscriber_list,
576 subscriber_list) { 599 subscriber_list) {
577 tipc_ref_lock(subscriber->ref);
578 subscriber_lock = subscriber->lock; 600 subscriber_lock = subscriber->lock;
601 spin_lock_bh(subscriber_lock);
579 subscr_terminate(subscriber); 602 subscr_terminate(subscriber);
580 spin_unlock_bh(subscriber_lock); 603 spin_unlock_bh(subscriber_lock);
581 } 604 }
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 93a8e674fac1..45d89bf4d202 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/subscr.h: Include file for TIPC subscription service 2 * net/tipc/subscr.h: Include file for TIPC network topology service
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -37,34 +37,44 @@
37#ifndef _TIPC_SUBSCR_H 37#ifndef _TIPC_SUBSCR_H
38#define _TIPC_SUBSCR_H 38#define _TIPC_SUBSCR_H
39 39
40struct subscription;
41
42typedef void (*tipc_subscr_event) (struct subscription *sub,
43 u32 found_lower, u32 found_upper,
44 u32 event, u32 port_ref, u32 node);
45
40/** 46/**
41 * struct subscription - TIPC network topology subscription object 47 * struct subscription - TIPC network topology subscription object
42 * @seq: name sequence associated with subscription 48 * @seq: name sequence associated with subscription
43 * @timeout: duration of subscription (in ms) 49 * @timeout: duration of subscription (in ms)
44 * @filter: event filtering to be done for subscription 50 * @filter: event filtering to be done for subscription
45 * @evt: template for events generated by subscription 51 * @event_cb: routine invoked when a subscription event is detected
46 * @subscription_list: adjacent subscriptions in subscriber's subscription list 52 * @timer: timer governing subscription duration (optional)
47 * @nameseq_list: adjacent subscriptions in name sequence's subscription list 53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
48 * @timer_ref: reference to timer governing subscription duration (may be NULL) 54 * @subscription_list: adjacent subscriptions in subscriber's subscription list
49 * @owner: pointer to subscriber object associated with this subscription 55 * @server_ref: object reference of server port associated with subscription
56 * @swap: indicates if subscriber uses opposite endianness in its messages
57 * @evt: template for events generated by subscription
50 */ 58 */
51 59
52struct subscription { 60struct subscription {
53 struct tipc_name_seq seq; 61 struct tipc_name_seq seq;
54 u32 timeout; 62 u32 timeout;
55 u32 filter; 63 u32 filter;
56 struct tipc_event evt; 64 tipc_subscr_event event_cb;
57 struct list_head subscription_list;
58 struct list_head nameseq_list;
59 struct timer_list timer; 65 struct timer_list timer;
60 struct subscriber *owner; 66 struct list_head nameseq_list;
67 struct list_head subscription_list;
68 u32 server_ref;
69 int swap;
70 struct tipc_event evt;
61}; 71};
62 72
63int tipc_subscr_overlap(struct subscription * sub, 73int tipc_subscr_overlap(struct subscription *sub,
64 u32 found_lower, 74 u32 found_lower,
65 u32 found_upper); 75 u32 found_upper);
66 76
67void tipc_subscr_report_overlap(struct subscription * sub, 77void tipc_subscr_report_overlap(struct subscription *sub,
68 u32 found_lower, 78 u32 found_lower,
69 u32 found_upper, 79 u32 found_upper,
70 u32 event, 80 u32 event,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 783317dacd30..70ceb1604ad8 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -8,8 +8,6 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
12 *
13 * Fixes: 11 * Fixes:
14 * Linus Torvalds : Assorted bug cures. 12 * Linus Torvalds : Assorted bug cures.
15 * Niibe Yutaka : async I/O support. 13 * Niibe Yutaka : async I/O support.
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 9ab31a3ce3ad..b210a88d0960 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -350,9 +350,9 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
350 * o execute requested action or pass command to the device driver 350 * o execute requested action or pass command to the device driver
351 */ 351 */
352 352
353int wanrouter_ioctl(struct inode *inode, struct file *file, 353long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
354 unsigned int cmd, unsigned long arg)
355{ 354{
355 struct inode *inode = file->f_path.dentry->d_inode;
356 int err = 0; 356 int err = 0;
357 struct proc_dir_entry *dent; 357 struct proc_dir_entry *dent;
358 struct wan_device *wandev; 358 struct wan_device *wandev;
@@ -372,6 +372,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
372 if (wandev->magic != ROUTER_MAGIC) 372 if (wandev->magic != ROUTER_MAGIC)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 lock_kernel();
375 switch (cmd) { 376 switch (cmd) {
376 case ROUTER_SETUP: 377 case ROUTER_SETUP:
377 err = wanrouter_device_setup(wandev, data); 378 err = wanrouter_device_setup(wandev, data);
@@ -403,6 +404,7 @@ int wanrouter_ioctl(struct inode *inode, struct file *file,
403 err = wandev->ioctl(wandev, cmd, arg); 404 err = wandev->ioctl(wandev, cmd, arg);
404 else err = -EINVAL; 405 else err = -EINVAL;
405 } 406 }
407 unlock_kernel();
406 return err; 408 return err;
407} 409}
408 410
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 5bebe40bf4e6..267f7ff49827 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -278,7 +278,7 @@ static const struct file_operations wandev_fops = {
278 .read = seq_read, 278 .read = seq_read,
279 .llseek = seq_lseek, 279 .llseek = seq_lseek,
280 .release = single_release, 280 .release = single_release,
281 .ioctl = wanrouter_ioctl, 281 .unlocked_ioctl = wanrouter_ioctl,
282}; 282};
283 283
284/* 284/*
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 80afacdae46c..f1da0b93bc56 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -143,8 +143,11 @@ void cfg80211_put_dev(struct cfg80211_registered_device *drv)
143int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, 143int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
144 char *newname) 144 char *newname)
145{ 145{
146 struct cfg80211_registered_device *drv;
146 int idx, taken = -1, result, digits; 147 int idx, taken = -1, result, digits;
147 148
149 mutex_lock(&cfg80211_drv_mutex);
150
148 /* prohibit calling the thing phy%d when %d is not its number */ 151 /* prohibit calling the thing phy%d when %d is not its number */
149 sscanf(newname, PHY_NAME "%d%n", &idx, &taken); 152 sscanf(newname, PHY_NAME "%d%n", &idx, &taken);
150 if (taken == strlen(newname) && idx != rdev->idx) { 153 if (taken == strlen(newname) && idx != rdev->idx) {
@@ -156,14 +159,30 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
156 * deny the name if it is phy<idx> where <idx> is printed 159 * deny the name if it is phy<idx> where <idx> is printed
157 * without leading zeroes. taken == strlen(newname) here 160 * without leading zeroes. taken == strlen(newname) here
158 */ 161 */
162 result = -EINVAL;
159 if (taken == strlen(PHY_NAME) + digits) 163 if (taken == strlen(PHY_NAME) + digits)
160 return -EINVAL; 164 goto out_unlock;
165 }
166
167
168 /* Ignore nop renames */
169 result = 0;
170 if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0)
171 goto out_unlock;
172
173 /* Ensure another device does not already have this name. */
174 list_for_each_entry(drv, &cfg80211_drv_list, list) {
175 result = -EINVAL;
176 if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0)
177 goto out_unlock;
161 } 178 }
162 179
163 /* this will check for collisions */ 180 /* this will only check for collisions in sysfs
181 * which is not even always compiled in.
182 */
164 result = device_rename(&rdev->wiphy.dev, newname); 183 result = device_rename(&rdev->wiphy.dev, newname);
165 if (result) 184 if (result)
166 return result; 185 goto out_unlock;
167 186
168 if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 187 if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
169 rdev->wiphy.debugfsdir, 188 rdev->wiphy.debugfsdir,
@@ -172,9 +191,13 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
172 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", 191 printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n",
173 newname); 192 newname);
174 193
175 nl80211_notify_dev_rename(rdev); 194 result = 0;
195out_unlock:
196 mutex_unlock(&cfg80211_drv_mutex);
197 if (result == 0)
198 nl80211_notify_dev_rename(rdev);
176 199
177 return 0; 200 return result;
178} 201}
179 202
180/* exported functions */ 203/* exported functions */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 28fbd0b0b568..f591871a7b4f 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -59,23 +59,21 @@ int ieee80211_radiotap_iterator_init(
59 return -EINVAL; 59 return -EINVAL;
60 60
61 /* sanity check for allowed length and radiotap length field */ 61 /* sanity check for allowed length and radiotap length field */
62 if (max_length < le16_to_cpu(get_unaligned(&radiotap_header->it_len))) 62 if (max_length < get_unaligned_le16(&radiotap_header->it_len))
63 return -EINVAL; 63 return -EINVAL;
64 64
65 iterator->rtheader = radiotap_header; 65 iterator->rtheader = radiotap_header;
66 iterator->max_length = le16_to_cpu(get_unaligned( 66 iterator->max_length = get_unaligned_le16(&radiotap_header->it_len);
67 &radiotap_header->it_len));
68 iterator->arg_index = 0; 67 iterator->arg_index = 0;
69 iterator->bitmap_shifter = le32_to_cpu(get_unaligned( 68 iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
70 &radiotap_header->it_present));
71 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); 69 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header);
72 iterator->this_arg = NULL; 70 iterator->this_arg = NULL;
73 71
74 /* find payload start allowing for extended bitmap(s) */ 72 /* find payload start allowing for extended bitmap(s) */
75 73
76 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { 74 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) {
77 while (le32_to_cpu(get_unaligned((__le32 *)iterator->arg)) & 75 while (get_unaligned_le32(iterator->arg) &
78 (1<<IEEE80211_RADIOTAP_EXT)) { 76 (1 << IEEE80211_RADIOTAP_EXT)) {
79 iterator->arg += sizeof(u32); 77 iterator->arg += sizeof(u32);
80 78
81 /* 79 /*
@@ -241,8 +239,8 @@ int ieee80211_radiotap_iterator_next(
241 if (iterator->bitmap_shifter & 1) { 239 if (iterator->bitmap_shifter & 1) {
242 /* b31 was set, there is more */ 240 /* b31 was set, there is more */
243 /* move to next u32 bitmap */ 241 /* move to next u32 bitmap */
244 iterator->bitmap_shifter = le32_to_cpu( 242 iterator->bitmap_shifter =
245 get_unaligned(iterator->next_bitmap)); 243 get_unaligned_le32(iterator->next_bitmap);
246 iterator->next_bitmap++; 244 iterator->next_bitmap++;
247 } else 245 } else
248 /* no more bitmaps: end */ 246 /* no more bitmaps: end */
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 947188a5b937..273a84359998 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -500,7 +500,7 @@ static int call_commit_handler(struct net_device *dev)
500/* 500/*
501 * Calculate size of private arguments 501 * Calculate size of private arguments
502 */ 502 */
503static inline int get_priv_size(__u16 args) 503static int get_priv_size(__u16 args)
504{ 504{
505 int num = args & IW_PRIV_SIZE_MASK; 505 int num = args & IW_PRIV_SIZE_MASK;
506 int type = (args & IW_PRIV_TYPE_MASK) >> 12; 506 int type = (args & IW_PRIV_TYPE_MASK) >> 12;
@@ -512,10 +512,9 @@ static inline int get_priv_size(__u16 args)
512/* 512/*
513 * Re-calculate the size of private arguments 513 * Re-calculate the size of private arguments
514 */ 514 */
515static inline int adjust_priv_size(__u16 args, 515static int adjust_priv_size(__u16 args, struct iw_point *iwp)
516 union iwreq_data * wrqu)
517{ 516{
518 int num = wrqu->data.length; 517 int num = iwp->length;
519 int max = args & IW_PRIV_SIZE_MASK; 518 int max = args & IW_PRIV_SIZE_MASK;
520 int type = (args & IW_PRIV_TYPE_MASK) >> 12; 519 int type = (args & IW_PRIV_TYPE_MASK) >> 12;
521 520
@@ -695,19 +694,150 @@ void wext_proc_exit(struct net *net)
695 */ 694 */
696 695
697/* ---------------------------------------------------------------- */ 696/* ---------------------------------------------------------------- */
697static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
698 const struct iw_ioctl_description *descr,
699 iw_handler handler, struct net_device *dev,
700 struct iw_request_info *info)
701{
702 int err, extra_size, user_length = 0, essid_compat = 0;
703 char *extra;
704
705 /* Calculate space needed by arguments. Always allocate
706 * for max space.
707 */
708 extra_size = descr->max_tokens * descr->token_size;
709
710 /* Check need for ESSID compatibility for WE < 21 */
711 switch (cmd) {
712 case SIOCSIWESSID:
713 case SIOCGIWESSID:
714 case SIOCSIWNICKN:
715 case SIOCGIWNICKN:
716 if (iwp->length == descr->max_tokens + 1)
717 essid_compat = 1;
718 else if (IW_IS_SET(cmd) && (iwp->length != 0)) {
719 char essid[IW_ESSID_MAX_SIZE + 1];
720
721 err = copy_from_user(essid, iwp->pointer,
722 iwp->length *
723 descr->token_size);
724 if (err)
725 return -EFAULT;
726
727 if (essid[iwp->length - 1] == '\0')
728 essid_compat = 1;
729 }
730 break;
731 default:
732 break;
733 }
734
735 iwp->length -= essid_compat;
736
737 /* Check what user space is giving us */
738 if (IW_IS_SET(cmd)) {
739 /* Check NULL pointer */
740 if (!iwp->pointer && iwp->length != 0)
741 return -EFAULT;
742 /* Check if number of token fits within bounds */
743 if (iwp->length > descr->max_tokens)
744 return -E2BIG;
745 if (iwp->length < descr->min_tokens)
746 return -EINVAL;
747 } else {
748 /* Check NULL pointer */
749 if (!iwp->pointer)
750 return -EFAULT;
751 /* Save user space buffer size for checking */
752 user_length = iwp->length;
753
754 /* Don't check if user_length > max to allow forward
755 * compatibility. The test user_length < min is
756 * implied by the test at the end.
757 */
758
759 /* Support for very large requests */
760 if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
761 (user_length > descr->max_tokens)) {
762 /* Allow userspace to GET more than max so
763 * we can support any size GET requests.
764 * There is still a limit : -ENOMEM.
765 */
766 extra_size = user_length * descr->token_size;
767
768 /* Note : user_length is originally a __u16,
769 * and token_size is controlled by us,
770 * so extra_size won't get negative and
771 * won't overflow...
772 */
773 }
774 }
775
776 /* kzalloc() ensures NULL-termination for essid_compat. */
777 extra = kzalloc(extra_size, GFP_KERNEL);
778 if (!extra)
779 return -ENOMEM;
780
781 /* If it is a SET, get all the extra data in here */
782 if (IW_IS_SET(cmd) && (iwp->length != 0)) {
783 if (copy_from_user(extra, iwp->pointer,
784 iwp->length *
785 descr->token_size)) {
786 err = -EFAULT;
787 goto out;
788 }
789 }
790
791 err = handler(dev, info, (union iwreq_data *) iwp, extra);
792
793 iwp->length += essid_compat;
794
795 /* If we have something to return to the user */
796 if (!err && IW_IS_GET(cmd)) {
797 /* Check if there is enough buffer up there */
798 if (user_length < iwp->length) {
799 err = -E2BIG;
800 goto out;
801 }
802
803 if (copy_to_user(iwp->pointer, extra,
804 iwp->length *
805 descr->token_size)) {
806 err = -EFAULT;
807 goto out;
808 }
809 }
810
811 /* Generate an event to notify listeners of the change */
812 if ((descr->flags & IW_DESCR_FLAG_EVENT) && err == -EIWCOMMIT) {
813 union iwreq_data *data = (union iwreq_data *) iwp;
814
815 if (descr->flags & IW_DESCR_FLAG_RESTRICT)
816 /* If the event is restricted, don't
817 * export the payload.
818 */
819 wireless_send_event(dev, cmd, data, NULL);
820 else
821 wireless_send_event(dev, cmd, data, extra);
822 }
823
824out:
825 kfree(extra);
826 return err;
827}
828
698/* 829/*
699 * Wrapper to call a standard Wireless Extension handler. 830 * Wrapper to call a standard Wireless Extension handler.
700 * We do various checks and also take care of moving data between 831 * We do various checks and also take care of moving data between
701 * user space and kernel space. 832 * user space and kernel space.
702 */ 833 */
703static int ioctl_standard_call(struct net_device * dev, 834static int ioctl_standard_call(struct net_device * dev,
704 struct ifreq * ifr, 835 struct iwreq *iwr,
705 unsigned int cmd, 836 unsigned int cmd,
837 struct iw_request_info *info,
706 iw_handler handler) 838 iw_handler handler)
707{ 839{
708 struct iwreq * iwr = (struct iwreq *) ifr;
709 const struct iw_ioctl_description * descr; 840 const struct iw_ioctl_description * descr;
710 struct iw_request_info info;
711 int ret = -EINVAL; 841 int ret = -EINVAL;
712 842
713 /* Get the description of the IOCTL */ 843 /* Get the description of the IOCTL */
@@ -715,145 +845,19 @@ static int ioctl_standard_call(struct net_device * dev,
715 return -EOPNOTSUPP; 845 return -EOPNOTSUPP;
716 descr = &(standard_ioctl[cmd - SIOCIWFIRST]); 846 descr = &(standard_ioctl[cmd - SIOCIWFIRST]);
717 847
718 /* Prepare the call */
719 info.cmd = cmd;
720 info.flags = 0;
721
722 /* Check if we have a pointer to user space data or not */ 848 /* Check if we have a pointer to user space data or not */
723 if (descr->header_type != IW_HEADER_TYPE_POINT) { 849 if (descr->header_type != IW_HEADER_TYPE_POINT) {
724 850
725 /* No extra arguments. Trivial to handle */ 851 /* No extra arguments. Trivial to handle */
726 ret = handler(dev, &info, &(iwr->u), NULL); 852 ret = handler(dev, info, &(iwr->u), NULL);
727 853
728 /* Generate an event to notify listeners of the change */ 854 /* Generate an event to notify listeners of the change */
729 if ((descr->flags & IW_DESCR_FLAG_EVENT) && 855 if ((descr->flags & IW_DESCR_FLAG_EVENT) &&
730 ((ret == 0) || (ret == -EIWCOMMIT))) 856 ((ret == 0) || (ret == -EIWCOMMIT)))
731 wireless_send_event(dev, cmd, &(iwr->u), NULL); 857 wireless_send_event(dev, cmd, &(iwr->u), NULL);
732 } else { 858 } else {
733 char * extra; 859 ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr,
734 int extra_size; 860 handler, dev, info);
735 int user_length = 0;
736 int err;
737 int essid_compat = 0;
738
739 /* Calculate space needed by arguments. Always allocate
740 * for max space. Easier, and won't last long... */
741 extra_size = descr->max_tokens * descr->token_size;
742
743 /* Check need for ESSID compatibility for WE < 21 */
744 switch (cmd) {
745 case SIOCSIWESSID:
746 case SIOCGIWESSID:
747 case SIOCSIWNICKN:
748 case SIOCGIWNICKN:
749 if (iwr->u.data.length == descr->max_tokens + 1)
750 essid_compat = 1;
751 else if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
752 char essid[IW_ESSID_MAX_SIZE + 1];
753
754 err = copy_from_user(essid, iwr->u.data.pointer,
755 iwr->u.data.length *
756 descr->token_size);
757 if (err)
758 return -EFAULT;
759
760 if (essid[iwr->u.data.length - 1] == '\0')
761 essid_compat = 1;
762 }
763 break;
764 default:
765 break;
766 }
767
768 iwr->u.data.length -= essid_compat;
769
770 /* Check what user space is giving us */
771 if (IW_IS_SET(cmd)) {
772 /* Check NULL pointer */
773 if ((iwr->u.data.pointer == NULL) &&
774 (iwr->u.data.length != 0))
775 return -EFAULT;
776 /* Check if number of token fits within bounds */
777 if (iwr->u.data.length > descr->max_tokens)
778 return -E2BIG;
779 if (iwr->u.data.length < descr->min_tokens)
780 return -EINVAL;
781 } else {
782 /* Check NULL pointer */
783 if (iwr->u.data.pointer == NULL)
784 return -EFAULT;
785 /* Save user space buffer size for checking */
786 user_length = iwr->u.data.length;
787
788 /* Don't check if user_length > max to allow forward
789 * compatibility. The test user_length < min is
790 * implied by the test at the end. */
791
792 /* Support for very large requests */
793 if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
794 (user_length > descr->max_tokens)) {
795 /* Allow userspace to GET more than max so
796 * we can support any size GET requests.
797 * There is still a limit : -ENOMEM. */
798 extra_size = user_length * descr->token_size;
799 /* Note : user_length is originally a __u16,
800 * and token_size is controlled by us,
801 * so extra_size won't get negative and
802 * won't overflow... */
803 }
804 }
805
806 /* Create the kernel buffer */
807 /* kzalloc ensures NULL-termination for essid_compat */
808 extra = kzalloc(extra_size, GFP_KERNEL);
809 if (extra == NULL)
810 return -ENOMEM;
811
812 /* If it is a SET, get all the extra data in here */
813 if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
814 err = copy_from_user(extra, iwr->u.data.pointer,
815 iwr->u.data.length *
816 descr->token_size);
817 if (err) {
818 kfree(extra);
819 return -EFAULT;
820 }
821 }
822
823 /* Call the handler */
824 ret = handler(dev, &info, &(iwr->u), extra);
825
826 iwr->u.data.length += essid_compat;
827
828 /* If we have something to return to the user */
829 if (!ret && IW_IS_GET(cmd)) {
830 /* Check if there is enough buffer up there */
831 if (user_length < iwr->u.data.length) {
832 kfree(extra);
833 return -E2BIG;
834 }
835
836 err = copy_to_user(iwr->u.data.pointer, extra,
837 iwr->u.data.length *
838 descr->token_size);
839 if (err)
840 ret = -EFAULT;
841 }
842
843 /* Generate an event to notify listeners of the change */
844 if ((descr->flags & IW_DESCR_FLAG_EVENT) &&
845 ((ret == 0) || (ret == -EIWCOMMIT))) {
846 if (descr->flags & IW_DESCR_FLAG_RESTRICT)
847 /* If the event is restricted, don't
848 * export the payload */
849 wireless_send_event(dev, cmd, &(iwr->u), NULL);
850 else
851 wireless_send_event(dev, cmd, &(iwr->u),
852 extra);
853 }
854
855 /* Cleanup - I told you it wasn't that long ;-) */
856 kfree(extra);
857 } 861 }
858 862
859 /* Call commit handler if needed and defined */ 863 /* Call commit handler if needed and defined */
@@ -881,25 +885,22 @@ static int ioctl_standard_call(struct net_device * dev,
881 * a iw_handler but process it in your ioctl handler (i.e. use the 885 * a iw_handler but process it in your ioctl handler (i.e. use the
882 * old driver API). 886 * old driver API).
883 */ 887 */
884static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr, 888static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd,
885 unsigned int cmd, iw_handler handler) 889 const struct iw_priv_args **descrp)
886{ 890{
887 struct iwreq * iwr = (struct iwreq *) ifr; 891 const struct iw_priv_args *descr;
888 const struct iw_priv_args * descr = NULL; 892 int i, extra_size;
889 struct iw_request_info info;
890 int extra_size = 0;
891 int i;
892 int ret = -EINVAL;
893 893
894 /* Get the description of the IOCTL */ 894 descr = NULL;
895 for (i = 0; i < dev->wireless_handlers->num_private_args; i++) 895 for (i = 0; i < dev->wireless_handlers->num_private_args; i++) {
896 if (cmd == dev->wireless_handlers->private_args[i].cmd) { 896 if (cmd == dev->wireless_handlers->private_args[i].cmd) {
897 descr = &(dev->wireless_handlers->private_args[i]); 897 descr = &dev->wireless_handlers->private_args[i];
898 break; 898 break;
899 } 899 }
900 }
900 901
901 /* Compute the size of the set/get arguments */ 902 extra_size = 0;
902 if (descr != NULL) { 903 if (descr) {
903 if (IW_IS_SET(cmd)) { 904 if (IW_IS_SET(cmd)) {
904 int offset = 0; /* For sub-ioctls */ 905 int offset = 0; /* For sub-ioctls */
905 /* Check for sub-ioctl handler */ 906 /* Check for sub-ioctl handler */
@@ -924,72 +925,77 @@ static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr,
924 extra_size = 0; 925 extra_size = 0;
925 } 926 }
926 } 927 }
928 *descrp = descr;
929 return extra_size;
930}
927 931
928 /* Prepare the call */ 932static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
929 info.cmd = cmd; 933 const struct iw_priv_args *descr,
930 info.flags = 0; 934 iw_handler handler, struct net_device *dev,
935 struct iw_request_info *info, int extra_size)
936{
937 char *extra;
938 int err;
931 939
932 /* Check if we have a pointer to user space data or not. */ 940 /* Check what user space is giving us */
933 if (extra_size == 0) { 941 if (IW_IS_SET(cmd)) {
934 /* No extra arguments. Trivial to handle */ 942 if (!iwp->pointer && iwp->length != 0)
935 ret = handler(dev, &info, &(iwr->u), (char *) &(iwr->u)); 943 return -EFAULT;
936 } else {
937 char * extra;
938 int err;
939 944
940 /* Check what user space is giving us */ 945 if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK))
941 if (IW_IS_SET(cmd)) { 946 return -E2BIG;
942 /* Check NULL pointer */ 947 } else if (!iwp->pointer)
943 if ((iwr->u.data.pointer == NULL) && 948 return -EFAULT;
944 (iwr->u.data.length != 0))
945 return -EFAULT;
946 949
947 /* Does it fits within bounds ? */ 950 extra = kmalloc(extra_size, GFP_KERNEL);
948 if (iwr->u.data.length > (descr->set_args & 951 if (!extra)
949 IW_PRIV_SIZE_MASK)) 952 return -ENOMEM;
950 return -E2BIG;
951 } else if (iwr->u.data.pointer == NULL)
952 return -EFAULT;
953 953
954 /* Always allocate for max space. Easier, and won't last 954 /* If it is a SET, get all the extra data in here */
955 * long... */ 955 if (IW_IS_SET(cmd) && (iwp->length != 0)) {
956 extra = kmalloc(extra_size, GFP_KERNEL); 956 if (copy_from_user(extra, iwp->pointer, extra_size)) {
957 if (extra == NULL) 957 err = -EFAULT;
958 return -ENOMEM; 958 goto out;
959
960 /* If it is a SET, get all the extra data in here */
961 if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
962 err = copy_from_user(extra, iwr->u.data.pointer,
963 extra_size);
964 if (err) {
965 kfree(extra);
966 return -EFAULT;
967 }
968 } 959 }
960 }
969 961
970 /* Call the handler */ 962 /* Call the handler */
971 ret = handler(dev, &info, &(iwr->u), extra); 963 err = handler(dev, info, (union iwreq_data *) iwp, extra);
972 964
973 /* If we have something to return to the user */ 965 /* If we have something to return to the user */
974 if (!ret && IW_IS_GET(cmd)) { 966 if (!err && IW_IS_GET(cmd)) {
967 /* Adjust for the actual length if it's variable,
968 * avoid leaking kernel bits outside.
969 */
970 if (!(descr->get_args & IW_PRIV_SIZE_FIXED))
971 extra_size = adjust_priv_size(descr->get_args, iwp);
975 972
976 /* Adjust for the actual length if it's variable, 973 if (copy_to_user(iwp->pointer, extra, extra_size))
977 * avoid leaking kernel bits outside. */ 974 err = -EFAULT;
978 if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) { 975 }
979 extra_size = adjust_priv_size(descr->get_args,
980 &(iwr->u));
981 }
982 976
983 err = copy_to_user(iwr->u.data.pointer, extra, 977out:
984 extra_size); 978 kfree(extra);
985 if (err) 979 return err;
986 ret = -EFAULT; 980}
987 }
988 981
989 /* Cleanup - I told you it wasn't that long ;-) */ 982static int ioctl_private_call(struct net_device *dev, struct iwreq *iwr,
990 kfree(extra); 983 unsigned int cmd, struct iw_request_info *info,
991 } 984 iw_handler handler)
985{
986 int extra_size = 0, ret = -EINVAL;
987 const struct iw_priv_args *descr;
992 988
989 extra_size = get_priv_descr_and_size(dev, cmd, &descr);
990
991 /* Check if we have a pointer to user space data or not. */
992 if (extra_size == 0) {
993 /* No extra arguments. Trivial to handle */
994 ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u));
995 } else {
996 ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr,
997 handler, dev, info, extra_size);
998 }
993 999
994 /* Call commit handler if needed and defined */ 1000 /* Call commit handler if needed and defined */
995 if (ret == -EIWCOMMIT) 1001 if (ret == -EIWCOMMIT)
@@ -999,12 +1005,21 @@ static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr,
999} 1005}
1000 1006
1001/* ---------------------------------------------------------------- */ 1007/* ---------------------------------------------------------------- */
1008typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *,
1009 unsigned int, struct iw_request_info *,
1010 iw_handler);
1011
1002/* 1012/*
1003 * Main IOCTl dispatcher. 1013 * Main IOCTl dispatcher.
1004 * Check the type of IOCTL and call the appropriate wrapper... 1014 * Check the type of IOCTL and call the appropriate wrapper...
1005 */ 1015 */
1006static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd) 1016static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
1017 unsigned int cmd,
1018 struct iw_request_info *info,
1019 wext_ioctl_func standard,
1020 wext_ioctl_func private)
1007{ 1021{
1022 struct iwreq *iwr = (struct iwreq *) ifr;
1008 struct net_device *dev; 1023 struct net_device *dev;
1009 iw_handler handler; 1024 iw_handler handler;
1010 1025
@@ -1019,12 +1034,12 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i
1019 * Note that 'cmd' is already filtered in dev_ioctl() with 1034 * Note that 'cmd' is already filtered in dev_ioctl() with
1020 * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ 1035 * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */
1021 if (cmd == SIOCGIWSTATS) 1036 if (cmd == SIOCGIWSTATS)
1022 return ioctl_standard_call(dev, ifr, cmd, 1037 return standard(dev, iwr, cmd, info,
1023 &iw_handler_get_iwstats); 1038 &iw_handler_get_iwstats);
1024 1039
1025 if (cmd == SIOCGIWPRIV && dev->wireless_handlers) 1040 if (cmd == SIOCGIWPRIV && dev->wireless_handlers)
1026 return ioctl_standard_call(dev, ifr, cmd, 1041 return standard(dev, iwr, cmd, info,
1027 &iw_handler_get_private); 1042 &iw_handler_get_private);
1028 1043
1029 /* Basic check */ 1044 /* Basic check */
1030 if (!netif_device_present(dev)) 1045 if (!netif_device_present(dev))
@@ -1035,9 +1050,9 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i
1035 if (handler) { 1050 if (handler) {
1036 /* Standard and private are not the same */ 1051 /* Standard and private are not the same */
1037 if (cmd < SIOCIWFIRSTPRIV) 1052 if (cmd < SIOCIWFIRSTPRIV)
1038 return ioctl_standard_call(dev, ifr, cmd, handler); 1053 return standard(dev, iwr, cmd, info, handler);
1039 else 1054 else
1040 return ioctl_private_call(dev, ifr, cmd, handler); 1055 return private(dev, iwr, cmd, info, handler);
1041 } 1056 }
1042 /* Old driver API : call driver ioctl handler */ 1057 /* Old driver API : call driver ioctl handler */
1043 if (dev->do_ioctl) 1058 if (dev->do_ioctl)
@@ -1045,27 +1060,154 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned i
1045 return -EOPNOTSUPP; 1060 return -EOPNOTSUPP;
1046} 1061}
1047 1062
1048/* entry point from dev ioctl */ 1063/* If command is `set a parameter', or `get the encoding parameters',
1049int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1064 * check if the user has the right to do it.
1050 void __user *arg) 1065 */
1066static int wext_permission_check(unsigned int cmd)
1051{ 1067{
1052 int ret;
1053
1054 /* If command is `set a parameter', or
1055 * `get the encoding parameters', check if
1056 * the user has the right to do it */
1057 if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) 1068 if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT)
1058 && !capable(CAP_NET_ADMIN)) 1069 && !capable(CAP_NET_ADMIN))
1059 return -EPERM; 1070 return -EPERM;
1060 1071
1072 return 0;
1073}
1074
1075/* entry point from dev ioctl */
1076static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
1077 unsigned int cmd, struct iw_request_info *info,
1078 wext_ioctl_func standard,
1079 wext_ioctl_func private)
1080{
1081 int ret = wext_permission_check(cmd);
1082
1083 if (ret)
1084 return ret;
1085
1061 dev_load(net, ifr->ifr_name); 1086 dev_load(net, ifr->ifr_name);
1062 rtnl_lock(); 1087 rtnl_lock();
1063 ret = wireless_process_ioctl(net, ifr, cmd); 1088 ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private);
1064 rtnl_unlock(); 1089 rtnl_unlock();
1065 if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct iwreq))) 1090
1091 return ret;
1092}
1093
1094int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
1095 void __user *arg)
1096{
1097 struct iw_request_info info = { .cmd = cmd, .flags = 0 };
1098 int ret;
1099
1100 ret = wext_ioctl_dispatch(net, ifr, cmd, &info,
1101 ioctl_standard_call,
1102 ioctl_private_call);
1103 if (ret >= 0 &&
1104 IW_IS_GET(cmd) &&
1105 copy_to_user(arg, ifr, sizeof(struct iwreq)))
1106 return -EFAULT;
1107
1108 return ret;
1109}
1110
1111#ifdef CONFIG_COMPAT
1112static int compat_standard_call(struct net_device *dev,
1113 struct iwreq *iwr,
1114 unsigned int cmd,
1115 struct iw_request_info *info,
1116 iw_handler handler)
1117{
1118 const struct iw_ioctl_description *descr;
1119 struct compat_iw_point *iwp_compat;
1120 struct iw_point iwp;
1121 int err;
1122
1123 descr = standard_ioctl + (cmd - SIOCIWFIRST);
1124
1125 if (descr->header_type != IW_HEADER_TYPE_POINT)
1126 return ioctl_standard_call(dev, iwr, cmd, info, handler);
1127
1128 iwp_compat = (struct compat_iw_point *) &iwr->u.data;
1129 iwp.pointer = compat_ptr(iwp_compat->pointer);
1130 iwp.length = iwp_compat->length;
1131 iwp.flags = iwp_compat->flags;
1132
1133 err = ioctl_standard_iw_point(&iwp, cmd, descr, handler, dev, info);
1134
1135 iwp_compat->pointer = ptr_to_compat(iwp.pointer);
1136 iwp_compat->length = iwp.length;
1137 iwp_compat->flags = iwp.flags;
1138
1139 return err;
1140}
1141
1142static int compat_private_call(struct net_device *dev, struct iwreq *iwr,
1143 unsigned int cmd, struct iw_request_info *info,
1144 iw_handler handler)
1145{
1146 const struct iw_priv_args *descr;
1147 int ret, extra_size;
1148
1149 extra_size = get_priv_descr_and_size(dev, cmd, &descr);
1150
1151 /* Check if we have a pointer to user space data or not. */
1152 if (extra_size == 0) {
1153 /* No extra arguments. Trivial to handle */
1154 ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u));
1155 } else {
1156 struct compat_iw_point *iwp_compat;
1157 struct iw_point iwp;
1158
1159 iwp_compat = (struct compat_iw_point *) &iwr->u.data;
1160 iwp.pointer = compat_ptr(iwp_compat->pointer);
1161 iwp.length = iwp_compat->length;
1162 iwp.flags = iwp_compat->flags;
1163
1164 ret = ioctl_private_iw_point(&iwp, cmd, descr,
1165 handler, dev, info, extra_size);
1166
1167 iwp_compat->pointer = ptr_to_compat(iwp.pointer);
1168 iwp_compat->length = iwp.length;
1169 iwp_compat->flags = iwp.flags;
1170 }
1171
1172 /* Call commit handler if needed and defined */
1173 if (ret == -EIWCOMMIT)
1174 ret = call_commit_handler(dev);
1175
1176 return ret;
1177}
1178
1179int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
1180 unsigned long arg)
1181{
1182 void __user *argp = (void __user *)arg;
1183 struct iw_request_info info;
1184 struct iwreq iwr;
1185 char *colon;
1186 int ret;
1187
1188 if (copy_from_user(&iwr, argp, sizeof(struct iwreq)))
1189 return -EFAULT;
1190
1191 iwr.ifr_name[IFNAMSIZ-1] = 0;
1192 colon = strchr(iwr.ifr_name, ':');
1193 if (colon)
1194 *colon = 0;
1195
1196 info.cmd = cmd;
1197 info.flags = IW_REQUEST_FLAG_COMPAT;
1198
1199 ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info,
1200 compat_standard_call,
1201 compat_private_call);
1202
1203 if (ret >= 0 &&
1204 IW_IS_GET(cmd) &&
1205 copy_to_user(argp, &iwr, sizeof(struct iwreq)))
1066 return -EFAULT; 1206 return -EFAULT;
1207
1067 return ret; 1208 return ret;
1068} 1209}
1210#endif
1069 1211
1070/************************* EVENT PROCESSING *************************/ 1212/************************* EVENT PROCESSING *************************/
1071/* 1213/*
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 6ba67c523c16..7b1c6ef04553 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -555,13 +555,11 @@ static struct sock *x25_make_new(struct sock *osk)
555 x25 = x25_sk(sk); 555 x25 = x25_sk(sk);
556 556
557 sk->sk_type = osk->sk_type; 557 sk->sk_type = osk->sk_type;
558 sk->sk_socket = osk->sk_socket;
559 sk->sk_priority = osk->sk_priority; 558 sk->sk_priority = osk->sk_priority;
560 sk->sk_protocol = osk->sk_protocol; 559 sk->sk_protocol = osk->sk_protocol;
561 sk->sk_rcvbuf = osk->sk_rcvbuf; 560 sk->sk_rcvbuf = osk->sk_rcvbuf;
562 sk->sk_sndbuf = osk->sk_sndbuf; 561 sk->sk_sndbuf = osk->sk_sndbuf;
563 sk->sk_state = TCP_ESTABLISHED; 562 sk->sk_state = TCP_ESTABLISHED;
564 sk->sk_sleep = osk->sk_sleep;
565 sk->sk_backlog_rcv = osk->sk_backlog_rcv; 563 sk->sk_backlog_rcv = osk->sk_backlog_rcv;
566 sock_copy_flags(sk, osk); 564 sock_copy_flags(sk, osk);
567 565
@@ -614,8 +612,7 @@ static int x25_release(struct socket *sock)
614 break; 612 break;
615 } 613 }
616 614
617 sock->sk = NULL; 615 sock_orphan(sk);
618 sk->sk_socket = NULL; /* Not used, but we should do this */
619out: 616out:
620 return 0; 617 return 0;
621} 618}
@@ -808,14 +805,12 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
808 if (!skb->sk) 805 if (!skb->sk)
809 goto out2; 806 goto out2;
810 newsk = skb->sk; 807 newsk = skb->sk;
811 newsk->sk_socket = newsock; 808 sock_graft(newsk, newsock);
812 newsk->sk_sleep = &newsock->wait;
813 809
814 /* Now attach up the new socket */ 810 /* Now attach up the new socket */
815 skb->sk = NULL; 811 skb->sk = NULL;
816 kfree_skb(skb); 812 kfree_skb(skb);
817 sk->sk_ack_backlog--; 813 sk->sk_ack_backlog--;
818 newsock->sk = newsk;
819 newsock->state = SS_CONNECTED; 814 newsock->state = SS_CONNECTED;
820 rc = 0; 815 rc = 0;
821out2: 816out2: